diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
588 files changed, 16523 insertions, 12219 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 87080c06e5fc..64e7acff8f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \ - amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o + amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o @@ -84,7 +84,8 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o + nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \ + cyan_skillfish_reg_init.o # add DF block amdgpu-y += \ @@ -137,7 +138,6 @@ amdgpu-y += \ # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o \ amdgpu_vkms.o # add GFX block diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index e13fbd974141..9569dc16dd3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl, return NULL; } +static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev) +{ + uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) | + BIT(AMD_IP_BLOCK_TYPE_SDMA); + + if (adev->aid_mask) + ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH); + + return ip_block_mask; +} + static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) { + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); + uint32_t ip_block; int r, i; amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!(adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_GFX || - adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_SDMA)) + ip_block = BIT(adev->ip_blocks[i].version->type); + if (!(ip_block_mask & ip_block)) continue; r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); @@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) { struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM]; + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); struct amdgpu_firmware_info *ucode; struct amdgpu_ip_block *cmn_block; + struct amdgpu_ip_block *ih_block; int ucode_count = 0; int i, r; @@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (r) return r; + if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) { + ih_block = amdgpu_device_ip_get_ip_block(adev, + AMD_IP_BLOCK_TYPE_IH); + if (unlikely(!ih_block)) { + dev_err(adev->dev, "Failed to get IH handle\n"); + return -EINVAL; + } + r = amdgpu_ip_block_resume(ih_block); + if (r) + return r; + } + /* Reinit GFXHUB */ adev->gfxhub.funcs->init(adev); r = adev->gfxhub.funcs->gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a5ccd0ada16a..2a0df4cabb99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -63,6 +63,7 @@ #include "kgd_pp_interface.h" #include "amd_shared.h" +#include "amdgpu_utils.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" #include "amdgpu_irq.h" @@ -434,7 +435,6 @@ struct amdgpu_clock { uint32_t default_mclk; uint32_t default_sclk; uint32_t default_dispclk; - uint32_t current_dispclk; uint32_t dp_extclk; uint32_t max_pixel_clock; }; @@ -470,9 +470,6 @@ struct amdgpu_sa_manager { void *cpu_ptr; }; -int amdgpu_fence_slab_init(void); -void amdgpu_fence_slab_fini(void); - /* * IRQS. */ @@ -548,7 +545,7 @@ struct amdgpu_wb { * this value can be accessed directly by using the offset as an index. * For the GPU address, it is necessary to use gpu_addr and the offset. */ - volatile uint32_t *wb; + uint32_t *wb; /** * @gpu_addr: @@ -724,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, /* VRAM scratch page for HDP bug, default vram page */ struct amdgpu_mem_scratch { struct amdgpu_bo *robj; - volatile uint32_t *ptr; + uint32_t *ptr; u64 gpu_addr; }; @@ -755,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; + struct amdgpu_bo *bo; }; /* Define the HW IP blocks will be used in driver , add more if necessary */ @@ -822,6 +820,20 @@ struct amdgpu_ip_map_info { uint32_t mask); }; +enum amdgpu_uid_type { + AMDGPU_UID_TYPE_XCD, + AMDGPU_UID_TYPE_AID, + AMDGPU_UID_TYPE_SOC, + AMDGPU_UID_TYPE_MAX +}; + +#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */ + +struct amdgpu_uid { + uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX]; + struct amdgpu_device *adev; +}; + struct amd_powerplay { void *pp_handle; const struct amd_pm_funcs *pp_funcs; @@ -886,6 +898,7 @@ struct amdgpu_mqd_prop { uint64_t csa_addr; uint64_t fence_address; bool tmz_queue; + bool kernel_queue; }; struct amdgpu_mqd { @@ -898,6 +911,9 @@ struct amdgpu_pcie_reset_ctx { bool in_link_reset; bool occurs_dpc; bool audio_suspended; + struct pci_dev *swus; + struct pci_saved_state *swus_pcistate; + struct pci_saved_state *swds_pcistate; }; /* @@ -931,12 +947,6 @@ enum amdgpu_enforce_isolation_mode { AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3, }; - -/* - * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. - */ -#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size) - struct amdgpu_device { struct device *dev; struct pci_dev *pdev; @@ -1140,9 +1150,6 @@ struct amdgpu_device { /* for userq and VM fences */ struct amdgpu_seq64 seq64; - /* KFD */ - struct amdgpu_kfd_dev kfd; - /* UMC */ struct amdgpu_umc umc; @@ -1282,6 +1289,7 @@ struct amdgpu_device { bool debug_exp_resets; bool debug_disable_gpu_ring_reset; bool debug_vm_userptr; + bool debug_disable_ce_logs; /* Protection for the following isolation structure */ struct mutex enforce_isolation_mutex; @@ -1303,6 +1311,12 @@ struct amdgpu_device { struct list_head userq_mgr_list; struct mutex userq_mutex; bool userq_halt_for_enforce_isolation; + struct amdgpu_uid *uid_info; + + /* KFD + * Must be last --ends in a flexible-array member. + */ + struct amdgpu_kfd_dev kfd; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1336,6 +1350,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) return container_of(bdev, struct amdgpu_device, mman.bdev); } +static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) +{ + return !!adev->aid_mask; +} + int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags); void amdgpu_device_fini_hw(struct amdgpu_device *adev); @@ -1387,7 +1406,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, u64 reg_addr, u64 reg_data); u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); @@ -1558,16 +1578,16 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, int amdgpu_device_mode1_reset(struct amdgpu_device *adev); int amdgpu_device_link_reset(struct amdgpu_device *adev); -bool amdgpu_device_supports_atpx(struct drm_device *dev); -bool amdgpu_device_supports_px(struct drm_device *dev); -bool amdgpu_device_supports_boco(struct drm_device *dev); -bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -int amdgpu_device_supports_baco(struct drm_device *dev); +bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); +bool amdgpu_device_supports_px(struct amdgpu_device *adev); +bool amdgpu_device_supports_boco(struct amdgpu_device *adev); +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); +int amdgpu_device_supports_baco(struct amdgpu_device *adev); void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); -int amdgpu_device_baco_enter(struct drm_device *dev); -int amdgpu_device_baco_exit(struct drm_device *dev); +int amdgpu_device_baco_enter(struct amdgpu_device *adev); +int amdgpu_device_baco_exit(struct amdgpu_device *adev); void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); @@ -1619,6 +1639,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); +void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); @@ -1669,7 +1690,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state); -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size); @@ -1700,8 +1722,11 @@ static inline void amdgpu_acpi_release(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state) { return 0; } -static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, - enum amdgpu_ss ss_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) +{ + return 0; +} static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } #endif @@ -1714,7 +1739,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return #endif #if defined(CONFIG_DRM_AMD_ISP) -int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]); +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); #endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); @@ -1760,4 +1785,24 @@ extern const struct attribute_group amdgpu_flash_attr_group; void amdgpu_set_init_level(struct amdgpu_device *adev, enum amdgpu_init_lvl_id lvl); + +static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) +{ + u32 status; + int r; + + r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); + if (r || PCI_POSSIBLE_ERROR(status)) { + dev_err(adev->dev, "device lost from bus!"); + return -ENODEV; + } + + return 0; +} + +void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst, + uint64_t uid); +uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 3835f2592914..9b3180449150 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks) list_for_each_entry_safe(node, tmp, &banks->list, node) { list_del(&node->node); kvfree(node); + banks->nr_banks--; } } @@ -115,6 +116,11 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID; int i; + if (adev->debug_disable_ce_logs && + bank->smu_err_type == ACA_SMU_TYPE_CE && + !ACA_BANK_ERR_IS_DEFFERED(bank)) + return; + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ for (i = 0; i < ARRAY_SIZE(aca_regs); i++) @@ -125,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n"); } +static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type) +{ + + struct aca_hwip *hwip; + int hwid, mcatype; + u64 ipid; + + if (!bank || type == ACA_HWIP_TYPE_UNKNOW) + return false; + + hwip = &aca_hwid_mcatypes[type]; + if (!hwip->hwid) + return false; + + ipid = bank->regs[ACA_REG_IDX_IPID]; + hwid = ACA_REG__IPID__HARDWAREID(ipid); + mcatype = ACA_REG__IPID__MCATYPE(ipid); + + return hwip->hwid == hwid && hwip->mcatype == mcatype; +} + static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, int start, int count, struct aca_banks *banks, struct ras_query_context *qctx) @@ -163,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ bank.smu_err_type = type; + /* + * Poison being consumed when injecting a UE while running background workloads, + * which are unexpected. + */ + if (type == ACA_SMU_TYPE_UE && + ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) && + !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC)) + continue; + aca_smu_bank_dump(adev, i, count, &bank, qctx); ret = aca_banks_add_bank(banks, &bank); @@ -173,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ return 0; } -static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type) -{ - - struct aca_hwip *hwip; - int hwid, mcatype; - u64 ipid; - - if (!bank || type == ACA_HWIP_TYPE_UNKNOW) - return false; - - hwip = &aca_hwid_mcatypes[type]; - if (!hwip->hwid) - return false; - - ipid = bank->regs[ACA_REG_IDX_IPID]; - hwid = ACA_REG__IPID__HARDWAREID(ipid); - mcatype = ACA_REG__IPID__MCATYPE(ipid); - - return hwip->hwid == hwid && hwip->mcatype == mcatype; -} - static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type) { const struct aca_bank_ops *bank_ops = handle->bank_ops; @@ -224,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_ mutex_lock(&aerr->lock); list_add_tail(&bank_error->node, &aerr->list); + aerr->nr_errors++; mutex_unlock(&aerr->lock); return bank_error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f5466c592d94..6c62e27b9800 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, /** * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * @ss_state: current smart shift event * * returns 0 on success, * otherwise return error number. */ -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) { - struct amdgpu_device *adev = drm_to_adev(dev); int r; - if (!amdgpu_device_supports_smart_shift(dev)) + if (!amdgpu_device_supports_smart_shift(adev)) return 0; switch (ss_state) { @@ -1545,7 +1545,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data) return acpi_match_device(data, dev) ? 1 : 0; } -int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]) +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev) { struct device *pdev __free(put_device) = NULL; struct acpi_device *acpi_pdev; @@ -1559,7 +1559,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]) if (!acpi_pdev) return -ENODEV; - strscpy(*hid, acpi_device_hid(acpi_pdev)); + *dev = acpi_pdev; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d8ac4b1051a8..a2879d2b7c8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -248,18 +248,42 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) +{ + if (adev->kfd.dev) { + if (adev->in_s0ix) + kgd2kfd_stop_sched_all_nodes(adev->kfd.dev); + else + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); + } +} + +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) +{ + int r = 0; + + if (adev->kfd.dev) { + if (adev->in_s0ix) + r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev); + else + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); + } + + return r; +} + +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev) { if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev, run_pm); + kgd2kfd_suspend_process(adev->kfd.dev); } -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev) { int r = 0; if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev, run_pm); + r = kgd2kfd_resume_process(adev->kfd.dev); return r; } @@ -642,7 +666,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, goto err; } - ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0); if (ret) goto err; @@ -749,12 +773,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) { - return kgd2kfd_check_and_lock_kfd(); + return kgd2kfd_check_and_lock_kfd(adev->kfd.dev); } void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { - kgd2kfd_unlock_kfd(); + kgd2kfd_unlock_kfd(adev->kfd.dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b6ca41859b53..9e120c934cc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -107,11 +107,13 @@ struct amdgpu_kfd_dev { bool init_complete; struct work_struct reset_work; - /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ - struct dev_pagemap pgmap; - /* Client for KFD BO GEM handle allocations */ struct drm_client_dev client; + + /* HMM page migration MEMORY_DEVICE_PRIVATE mapping + * Must be last --ends in a flexible-array member. + */ + struct dev_pagemap pgmap; }; enum kgd_engine_type { @@ -154,8 +156,10 @@ struct amdkfd_process_info { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc); +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc); +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev); +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); @@ -411,18 +415,22 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc); +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc); +void kgd2kfd_suspend_process(struct kfd_dev *kfd); +int kgd2kfd_resume_process(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd, struct amdgpu_reset_context *reset_context); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); -int kgd2kfd_check_and_lock_kfd(void); -void kgd2kfd_unlock_kfd(void); +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); +int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); +int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd); bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, bool retry_fault); @@ -454,11 +462,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) { } -static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { } -static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) +{ + return 0; +} + +static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ +} + +static inline int kgd2kfd_resume_process(struct kfd_dev *kfd) { return 0; } @@ -489,12 +506,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } -static inline int kgd2kfd_check_and_lock_kfd(void) +static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { return 0; } -static inline void kgd2kfd_unlock_kfd(void) +static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { } @@ -503,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) return 0; } +static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) +{ + return 0; +} + static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { return 0; } +static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) +{ + return 0; +} + static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) { return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index ffbaa8bc5eea..1105a09e55dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai if (!down_read_trylock(&adev->reset_domain->sem)) return; - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); if (suspend_resume_compute_scheduler(adev, true)) goto out; @@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai out: suspend_resume_compute_scheduler(adev, false); - amdgpu_amdkfd_resume(adev, false); + amdgpu_amdkfd_resume(adev, true); up_read(&adev->reset_domain->sem); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 04ef0ca10541..0239114fb6c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 6d08bc2781a3..f2278a0937ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+12) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index e0e6a6a49d90..aaccf0b9947d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (7+11+1+12+12) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c index 6f0dc23c901b..e0ceab400b2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c @@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (last_reg - first_reg + 1) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 260165bbe373..7c54fe6b0f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -213,19 +213,35 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, spin_lock(&kfd_mem_limit.mem_limit_lock); if (kfd_mem_limit.system_mem_used + system_mem_needed > - kfd_mem_limit.max_system_mem_limit) + kfd_mem_limit.max_system_mem_limit) { pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); + if (!no_system_mem_limit) { + ret = -ENOMEM; + goto release; + } + } - if ((kfd_mem_limit.system_mem_used + system_mem_needed > - kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || - (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > - kfd_mem_limit.max_ttm_mem_limit) || - (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > - vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) { + if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > + kfd_mem_limit.max_ttm_mem_limit) { ret = -ENOMEM; goto release; } + /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with + * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip + * VRAM check since ttm_mem_limit check already cover this allocation + */ + + if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) { + uint64_t vram_available = + vram_size - reserved_for_pt - reserved_for_ras - + atomic64_read(&adev->vram_pin_size); + if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) { + ret = -ENOMEM; + goto release; + } + } + /* Update memory accounting by decreasing available system * memory, TTM memory and GPU memory as computed above */ @@ -494,7 +510,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL); } -static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) +static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct kgd_mem *mem) { uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_DEFAULT; @@ -504,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; - return amdgpu_gem_va_map_flags(adev, mapping_flags); + return mapping_flags; } /** @@ -961,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, goto unwind; } attachment[i]->va = va; - attachment[i]->pte_flags = get_pte_flags(adev, mem); + attachment[i]->pte_flags = get_pte_flags(adev, vm, mem); attachment[i]->adev = adev; list_add(&attachment[i]->list, &mem->attachments); @@ -1072,7 +1089,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); + ret = amdgpu_ttm_tt_get_user_pages(bo, &range); if (ret) { if (ret == -EAGAIN) pr_debug("Failed to get user pages, try again\n"); @@ -1086,6 +1103,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, pr_err("%s: Failed to reserve BO\n", __func__); goto release_out; } + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + amdgpu_bo_placement_from_domain(bo, mem->domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) @@ -1626,11 +1646,15 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, uint64_t vram_available, system_mem_available, ttm_mem_available; spin_lock(&kfd_mem_limit.mem_limit_lock); - vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) - - adev->kfd.vram_used_aligned[xcp_id] - - atomic64_read(&adev->vram_pin_size) - - reserved_for_pt - - reserved_for_ras; + if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu) + vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) + - adev->kfd.vram_used_aligned[xcp_id]; + else + vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) + - adev->kfd.vram_used_aligned[xcp_id] + - atomic64_read(&adev->vram_pin_size) + - reserved_for_pt + - reserved_for_ras; if (adev->apu_prefer_gtt) { system_mem_available = no_system_mem_limit ? @@ -2544,8 +2568,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, - &mem->range); + ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); if (ret) { pr_debug("Failed %d to get user pages\n", ret); @@ -2574,6 +2597,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ret = 0; } + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range); + mutex_lock(&process_info->notifier_lock); /* Mark the BO as valid unless it was invalidated @@ -2969,9 +2994,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * struct amdgpu_device *adev = amdgpu_ttm_adev( peer_vm->root.bo->tbo.bdev); + struct amdgpu_fpriv *fpriv = + container_of(peer_vm, struct amdgpu_fpriv, vm); + + ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); + if (ret) { + dev_dbg(adev->dev, + "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n", + pid_nr(process_info->pid)); + goto validate_map_fail; + } + ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket); if (ret) { - pr_debug("Memory eviction: handle moved failed. Try again\n"); + dev_dbg(adev->dev, + "Memory eviction: handle moved failed, pid %8d. Try again.\n", + pid_nr(process_info->pid)); goto validate_map_fail; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index e476e45b996a..763f2b8dcf13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); - adev->clock.current_dispclk = adev->clock.default_dispclk; adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); if (adev->clock.max_pixel_clock == 0) @@ -1816,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, return sysfs_emit(buf, "%s\n", ctx->vbios_pn); } +static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct atom_context *ctx = adev->mode_info.atom_context; + + return sysfs_emit(buf, "%s\n", ctx->build_num); +} + static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, NULL); +static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL); static struct attribute *amdgpu_vbios_version_attrs[] = { - &dev_attr_vbios_version.attr, - NULL + &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL }; +static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, + int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct atom_context *ctx = adev->mode_info.atom_context; + + if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num)) + return 0; + + return attr->mode; +} + const struct attribute_group amdgpu_vbios_version_attr_group = { - .attrs = amdgpu_vbios_version_attrs + .attrs = amdgpu_vbios_version_attrs, + .is_visible = amdgpu_vbios_version_attrs_is_visible, }; int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 702f6610d024..66fb37b64388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list) int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, struct drm_amdgpu_bo_list_entry **info_param) { - const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); + const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); + const uint32_t bo_info_size = in->bo_info_size; + const uint32_t bo_number = in->bo_number; struct drm_amdgpu_bo_list_entry *info; - int r; - - info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); - if (!info) - return -ENOMEM; /* copy the handle array from userspace to a kernel buffer */ - r = -EFAULT; - if (likely(info_size == in->bo_info_size)) { - unsigned long bytes = in->bo_number * - in->bo_info_size; - - if (copy_from_user(info, uptr, bytes)) - goto error_free; - + if (likely(info_size == bo_info_size)) { + info = vmemdup_array_user(uptr, bo_number, info_size); + if (IS_ERR(info)) + return PTR_ERR(info); } else { - unsigned long bytes = min(in->bo_info_size, info_size); + const uint32_t bytes = min(bo_info_size, info_size); unsigned i; - memset(info, 0, in->bo_number * info_size); - for (i = 0; i < in->bo_number; ++i) { - if (copy_from_user(&info[i], uptr, bytes)) - goto error_free; + info = kvmalloc_array(bo_number, info_size, GFP_KERNEL); + if (!info) + return -ENOMEM; - uptr += in->bo_info_size; + memset(info, 0, bo_number * info_size); + for (i = 0; i < bo_number; ++i, uptr += bo_info_size) { + if (copy_from_user(&info[i], uptr, bytes)) { + kvfree(info); + return -EFAULT; + } } } *info_param = info; return 0; - -error_free: - kvfree(info); - return r; } int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 555cd6d877c3..a716c9886c74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -38,7 +38,6 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *bo; struct amdgpu_bo_va *bo_va; uint32_t priority; - struct page **user_pages; struct hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 5e375e9c4f5d..47e9bfba0642 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder, struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; int i; - static const struct mode_size { + int n; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; int w; int h; - } common_modes[17] = { - { 640, 480}, - { 720, 480}, - { 800, 600}, - { 848, 480}, - {1024, 768}, - {1152, 768}, - {1280, 720}, - {1280, 800}, - {1280, 854}, - {1280, 960}, - {1280, 1024}, - {1440, 900}, - {1400, 1050}, - {1680, 1050}, - {1600, 1200}, - {1920, 1080}, - {1920, 1200} + } common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} }; - for (i = 0; i < 17; i++) { + n = ARRAY_SIZE(common_modes); + + for (i = 0; i < n; i++) { if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { if (common_modes[i].w > 1024 || common_modes[i].h > 768) @@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder, common_modes[i].h == native_mode->vdisplay)) continue; } - if (common_modes[i].w < 320 || common_modes[i].h < 200) - continue; mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); if (!mode) return; + strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN); drm_mode_probed_add(connector, mode); } @@ -1195,29 +1192,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector) amdgpu_connector->use_digital = true; } +/** + * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock + * @adev: pointer to amdgpu_device + * + * Return: maximum supported HDMI (TMDS) pixel clock in KHz. + */ +static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev) +{ + if (adev->asic_type >= CHIP_POLARIS10) + return 600000; + else if (adev->asic_type >= CHIP_TONGA) + return 300000; + else + return 297000; +} + +/** + * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors + * @connector: DRM connector to validate the mode on + * @mode: display mode to validate + * + * Validate the given display mode on DVI and HDMI connectors, including + * analog signals on DVI-I. + * + * Return: drm_mode_status indicating whether the mode is valid. + */ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, const struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev); + const int max_dvi_single_link_pixel_clock = 165000; + int max_digital_pixel_clock_khz; /* XXX check mode bandwidth */ - if (amdgpu_connector->use_digital && (mode->clock > 165000)) { - if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || - (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || - (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) { - return MODE_OK; - } else if (connector->display_info.is_hdmi) { - /* HDMI 1.3+ supports max clock of 340 Mhz */ - if (mode->clock > 340000) - return MODE_CLOCK_HIGH; - else - return MODE_OK; - } else { - return MODE_CLOCK_HIGH; + if (amdgpu_connector->use_digital) { + switch (amdgpu_connector->connector_object_id) { + case CONNECTOR_OBJECT_ID_HDMI_TYPE_A: + max_digital_pixel_clock_khz = max_hdmi_pixel_clock; + break; + case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I: + case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D: + max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock; + break; + case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I: + case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D: + case CONNECTOR_OBJECT_ID_HDMI_TYPE_B: + max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2; + break; } + + /* When the display EDID claims that it's an HDMI display, + * we use the HDMI encoder mode of the display HW, + * so we should verify against the max HDMI clock here. + */ + if (connector->display_info.is_hdmi) + max_digital_pixel_clock_khz = max_hdmi_pixel_clock; + + if (mode->clock > max_digital_pixel_clock_khz) + return MODE_CLOCK_HIGH; } /* check against the max pixel clock */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index 5a234eadae8b..ef996493115f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -68,7 +68,6 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev, hdr->error_severity = sev; hdr->valid_bits.platform_id = 1; - hdr->valid_bits.partition_id = 1; hdr->valid_bits.timestamp = 1; amdgpu_cper_get_timestamp(&hdr->timestamp); @@ -174,7 +173,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev, struct cper_sec_nonstd_err *section; bool poison; - poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true; + poison = sev != CPER_SEV_NON_FATAL_CORRECTED; section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx)); section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr + NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); @@ -206,24 +205,31 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev { struct cper_sec_desc *section_desc; struct cper_sec_nonstd_err *section; + uint32_t socket_id; section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx)); section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr + NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false, - CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN, + CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN, NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); section->hdr.valid_bits.err_info_cnt = 1; section->hdr.valid_bits.err_context_cnt = 1; section->info.error_type = RUNTIME; + section->info.valid_bits.ms_chk = 1; section->info.ms_chk_bits.err_type_valid = 1; + section->info.ms_chk_bits.err_type = 1; + section->info.ms_chk_bits.pcc = 1; section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH; section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump); /* Hardcoded Reg dump for bad page threshold CPER */ + socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ? + adev->smuio.funcs->get_socket_id(adev) : + 0; section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1; section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0; section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137; @@ -234,8 +240,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0; section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2; section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff; - section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0; - section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96; + section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01; + section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12); section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0; section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0; @@ -326,7 +332,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev) return -ENOMEM; } - amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM); + amdgpu_cper_entry_fill_hdr(adev, bp_threshold, + AMDGPU_CPER_TYPE_BP_THRESHOLD, + CPER_SEV_FATAL); ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0); if (ret) return ret; @@ -457,7 +465,7 @@ calc: void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) { - u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask; + u64 pos, wptr_old, rptr; int rec_cnt_dw = count >> 2; u32 chunk, ent_sz; u8 *s = (u8 *)src; @@ -470,9 +478,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) return; } + mutex_lock(&ring->adev->cper.ring_lock); + wptr_old = ring->wptr; + rptr = *ring->rptr_cpu_addr & ring->ptr_mask; - mutex_lock(&ring->adev->cper.ring_lock); while (count) { ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr); chunk = umin(ent_sz, count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9ea0d9b71f48..9cd7741d2254 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -29,6 +29,7 @@ #include <linux/pagemap.h> #include <linux/sync_file.h> #include <linux/dma-buf.h> +#include <linux/hmm.h> #include <drm/amdgpu_drm.h> #include <drm/drm_syncobj.h> @@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; struct amdgpu_vm *vm = &fpriv->vm; - uint64_t *chunk_array_user; uint64_t *chunk_array; uint32_t uf_offset = 0; size_t size; int ret; int i; - chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - - /* get chunks */ - chunk_array_user = u64_to_user_ptr(cs->in.chunks); - if (copy_from_user(chunk_array, chunk_array_user, - sizeof(uint64_t)*cs->in.num_chunks)) { - ret = -EFAULT; - goto free_chunk; - } + chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), + cs->in.num_chunks, + sizeof(uint64_t)); + if (IS_ERR(chunk_array)) + return PTR_ERR(chunk_array); p->nchunks = cs->in.num_chunks; p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), @@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, for (i = 0; i < p->nchunks; i++) { struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; struct drm_amdgpu_cs_chunk user_chunk; - uint32_t __user *cdata; chunk_ptr = u64_to_user_ptr(chunk_array[i]); if (copy_from_user(&user_chunk, chunk_ptr, @@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = u64_to_user_ptr(user_chunk.chunk_data); - p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), - GFP_KERNEL); - if (p->chunks[i].kdata == NULL) { - ret = -ENOMEM; + p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), + size, + sizeof(uint32_t)); + if (IS_ERR(p->chunks[i].kdata)) { + ret = PTR_ERR(p->chunks[i].kdata); i--; goto free_partial_kdata; } size *= sizeof(uint32_t); - if (copy_from_user(p->chunks[i].kdata, cdata, size)) { - ret = -EFAULT; - goto free_partial_kdata; - } /* Assume the worst on the following checks */ ret = -EINVAL; @@ -286,14 +274,15 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } } - if (!p->gang_size) { + if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { ret = -EINVAL; goto free_all_kdata; } for (i = 0; i < p->gang_size; ++i) { ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, - num_ibs[i], &p->jobs[i]); + num_ibs[i], &p->jobs[i], + p->filp->client_id); if (ret) goto free_all_kdata; switch (p->adev->enforce_isolation[fpriv->xcp_id]) { @@ -395,7 +384,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_DELAYED, ib); if (r) { - DRM_ERROR("Failed to get ib !\n"); + drm_err(adev_to_drm(p->adev), "Failed to get ib !\n"); return r; } @@ -467,7 +456,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); if (r) { - DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", + drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n", handle, point, r); return r; } @@ -895,26 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { bool userpage_invalidated = false; struct amdgpu_bo *bo = e->bo; - int i; - - e->user_pages = kvcalloc(bo->tbo.ttm->num_pages, - sizeof(struct page *), - GFP_KERNEL); - if (!e->user_pages) { - DRM_ERROR("kvmalloc_array failure\n"); - r = -ENOMEM; - goto out_free_user_pages; - } - r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range); - if (r) { - kvfree(e->user_pages); - e->user_pages = NULL; + r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); + if (r) goto out_free_user_pages; - } for (i = 0; i < bo->tbo.ttm->num_pages; i++) { - if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { + if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { userpage_invalidated = true; break; } @@ -958,7 +934,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) && - e->user_invalidated && e->user_pages) { + e->user_invalidated) { amdgpu_bo_placement_from_domain(e->bo, AMDGPU_GEM_DOMAIN_CPU); r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement, @@ -967,11 +943,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out_free_user_pages; amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm, - e->user_pages); + e->range); } - - kvfree(e->user_pages); - e->user_pages = NULL; } amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, @@ -982,7 +955,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL, amdgpu_cs_bo_validate, p); if (r) { - DRM_ERROR("amdgpu_vm_validate() failed.\n"); + drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n"); goto out_free_user_pages; } @@ -1013,11 +986,7 @@ out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->bo; - if (!e->user_pages) - continue; amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); - kvfree(e->user_pages); - e->user_pages = NULL; e->range = NULL; } mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1060,13 +1029,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); if (r) { - DRM_ERROR("IB va_start is invalid\n"); + drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n"); return r; } if ((va_start + ib->length_dw * 4) > (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("IB va_start+ib_bytes is invalid\n"); + drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n"); return -EINVAL; } @@ -1138,6 +1107,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } } + if (!amdgpu_vm_ready(vm)) + return -EINVAL; + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; @@ -1234,7 +1206,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); if (r) { if (r != -ERESTARTSYS) - DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); + drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n"); return r; } @@ -1447,7 +1419,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(&parser, adev, filp, data); if (r) { - DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r); + drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r); return r; } @@ -1462,9 +1434,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_bos(&parser, data); if (r) { if (r == -ENOMEM) - DRM_ERROR("Not enough memory for command submission!\n"); + drm_err(dev, "Not enough memory for command submission!\n"); else if (r != -ERESTARTSYS && r != -EAGAIN) - DRM_DEBUG("Failed to process the buffer list %d!\n", r); + drm_dbg(dev, "Failed to process the buffer list %d!\n", r); goto error_fini; } @@ -1763,30 +1735,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, { struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_wait_fences *wait = data; - uint32_t fence_count = wait->in.fence_count; - struct drm_amdgpu_fence *fences_user; struct drm_amdgpu_fence *fences; int r; /* Get the fences from userspace */ - fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), - GFP_KERNEL); - if (fences == NULL) - return -ENOMEM; - - fences_user = u64_to_user_ptr(wait->in.fences); - if (copy_from_user(fences, fences_user, - sizeof(struct drm_amdgpu_fence) * fence_count)) { - r = -EFAULT; - goto err_free_fences; - } + fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), + wait->in.fence_count, + sizeof(struct drm_amdgpu_fence)); + if (IS_ERR(fences)) + return PTR_ERR(fences); if (wait->in.wait_all) r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); else r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); -err_free_fences: kfree(fences); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f81608330a3d..a70651050acf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) ti = amdgpu_vm_get_task_info_vm(vm); if (ti) { - seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name); + seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name); amdgpu_vm_put_task_info(ti); } @@ -2131,6 +2131,61 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_pt_info_read(struct seq_file *m, void *unused) +{ + struct drm_file *file; + struct amdgpu_fpriv *fpriv; + struct amdgpu_bo *root_bo; + struct amdgpu_device *adev; + int r; + + file = m->private; + if (!file) + return -EINVAL; + + adev = drm_to_adev(file->minor->dev); + fpriv = file->driver_priv; + if (!fpriv || !fpriv->vm.root.bo) + return -ENODEV; + + root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); + r = amdgpu_bo_reserve(root_bo, true); + if (r) { + amdgpu_bo_unref(&root_bo); + return -EINVAL; + } + + seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo)); + seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn); + seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level); + seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size); + seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size); + + amdgpu_bo_unreserve(root_bo); + amdgpu_bo_unref(&root_bo); + + return 0; +} + +static int amdgpu_pt_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_pt_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_pt_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_pt_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ + debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, + &amdgpu_pt_info_fops); +} + #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { @@ -2140,4 +2195,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 0425432d8659..e7b3c38e5186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); +void amdgpu_debugfs_vm_init(struct drm_file *file); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 7b50741dc097..8a026bc9ea44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, coredump->reset_time.tv_nsec); - if (coredump->reset_task_info.pid) + if (coredump->reset_task_info.task.pid) drm_printf(&p, "process_name: %s PID: %d\n", coredump->reset_task_info.process_name, - coredump->reset_task_info.pid); + coredump->reset_task_info.task.pid); /* SOC Information */ drm_printf(&p, "\nSOC Information\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index aa32df7e2fb2..a77000c2e0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -95,6 +95,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 #define AMDGPU_MAX_RETRY_LIMIT 2 @@ -178,6 +179,8 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = { BIT(AMD_IP_BLOCK_TYPE_PSP) }; +static void amdgpu_device_load_switch_state(struct amdgpu_device *adev); + static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, enum amd_ip_block_type block) { @@ -232,7 +235,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) { int ret = 0; - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) ret = sysfs_create_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); @@ -241,7 +244,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) sysfs_remove_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); } @@ -411,19 +414,16 @@ static const struct attribute_group amdgpu_board_attrs_group = { static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); - /** * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ATPX power control, * otherwise return false. */ -bool amdgpu_device_supports_px(struct drm_device *dev) +bool amdgpu_device_supports_px(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) return true; return false; @@ -432,15 +432,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev) /** * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ACPI power control, * otherwise return false. */ -bool amdgpu_device_supports_boco(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; @@ -453,29 +451,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) /** * amdgpu_device_supports_baco - Does the device support BACO * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Return: * 1 if the device supports BACO; * 3 if the device supports MACO (only works if BACO is supported) * otherwise return 0. */ -int amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - return amdgpu_asic_supports_baco(adev); } void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) { - struct drm_device *dev; int bamaco_support; - dev = adev_to_drm(adev); - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - bamaco_support = amdgpu_device_supports_baco(dev); + bamaco_support = amdgpu_device_supports_baco(adev); switch (amdgpu_runtime_pm) { case 2: @@ -495,10 +488,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) break; case -1: case -2: - if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + if (amdgpu_device_supports_px(adev)) { + /* enable PX as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_PX; dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + } else if (amdgpu_device_supports_boco(adev)) { + /* enable boco as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; dev_info(adev->dev, "Using BOCO for runtime pm\n"); } else { @@ -547,14 +542,14 @@ no_runtime_pm: * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with Smart Shift support, * otherwise returns false. */ -bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev) { - return (amdgpu_device_supports_boco(dev) && + return (amdgpu_device_supports_boco(adev) && amdgpu_acpi_is_power_shift_control_supported()); } @@ -1288,14 +1283,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) */ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg); BUG(); return 0; } static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1312,15 +1307,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg */ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%04X with 0x%08X\n", reg, + v); BUG(); } static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%llX with 0x%08X\n", reg, + v); BUG(); } @@ -1336,14 +1333,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui */ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n", + reg); BUG(); return 0; } static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1360,15 +1358,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r */ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", + reg, v); BUG(); } static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", + reg, v); BUG(); } @@ -1386,8 +1386,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", - reg, block); + dev_err(adev->dev, + "Invalid callback to read register 0x%04X in block 0x%04X\n", + reg, block); BUG(); return 0; } @@ -1407,8 +1408,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", - reg, block, v); + dev_err(adev->dev, + "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", + reg, block, v); BUG(); } @@ -1694,7 +1696,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space, please check!!\n"); + dev_warn( + adev->dev, + "System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -1734,9 +1738,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) r = pci_resize_resource(adev->pdev, 0, rbar_size); if (r == -ENOSPC) - DRM_INFO("Not enough PCI address space for a large BAR."); + dev_info(adev->dev, + "Not enough PCI address space for a large BAR."); else if (r && r != -ENOTSUPP) - DRM_ERROR("Problem resizing BAR0 (%d).", r); + dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); pci_assign_unassigned_bus_resources(adev->pdev->bus); @@ -1838,8 +1843,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) case 0: return false; default: - DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", - amdgpu_seamless); + dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", + amdgpu_seamless); return false; } @@ -2015,7 +2020,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; if (!is_os_64) { - DRM_WARN("Not 64-bit OS, feature not supported\n"); + dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); goto def_value; } si_meminfo(&si); @@ -2030,7 +2035,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) if (total_memory < dram_size_seven_GB) goto def_value1; } else { - DRM_WARN("Smu memory pool size not supported\n"); + dev_warn(adev->dev, "Smu memory pool size not supported\n"); goto def_value; } adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; @@ -2038,7 +2043,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; def_value1: - DRM_WARN("No enough system memory\n"); + dev_warn(adev->dev, "No enough system memory\n"); def_value: adev->pm.smu_prv_buffer_size = 0; } @@ -2190,7 +2195,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, struct drm_device *dev = pci_get_drvdata(pdev); int r; - if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_px(drm_to_adev(dev)) && + state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -2202,12 +2208,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, amdgpu_device_load_pci_state(pdev); r = pci_enable_device(pdev); if (r) - DRM_WARN("pci_enable_device failed (%d)\n", r); + dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", + r); amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { - pr_info("switched off\n"); + dev_info(&pdev->dev, "switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_prepare(dev); amdgpu_device_suspend(dev, true); @@ -2274,8 +2281,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2308,8 +2316,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_powergating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2439,6 +2448,33 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, return 1; } +static const char *ip_block_names[] = { + [AMD_IP_BLOCK_TYPE_COMMON] = "common", + [AMD_IP_BLOCK_TYPE_GMC] = "gmc", + [AMD_IP_BLOCK_TYPE_IH] = "ih", + [AMD_IP_BLOCK_TYPE_SMC] = "smu", + [AMD_IP_BLOCK_TYPE_PSP] = "psp", + [AMD_IP_BLOCK_TYPE_DCE] = "dce", + [AMD_IP_BLOCK_TYPE_GFX] = "gfx", + [AMD_IP_BLOCK_TYPE_SDMA] = "sdma", + [AMD_IP_BLOCK_TYPE_UVD] = "uvd", + [AMD_IP_BLOCK_TYPE_VCE] = "vce", + [AMD_IP_BLOCK_TYPE_ACP] = "acp", + [AMD_IP_BLOCK_TYPE_VCN] = "vcn", + [AMD_IP_BLOCK_TYPE_MES] = "mes", + [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg", + [AMD_IP_BLOCK_TYPE_VPE] = "vpe", + [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm", + [AMD_IP_BLOCK_TYPE_ISP] = "isp", +}; + +static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type) +{ + int idx = (int)type; + + return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown"; +} + /** * amdgpu_device_ip_block_add * @@ -2467,8 +2503,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, break; } - dev_info(adev->dev, "detected ip block number %d <%s>\n", - adev->num_ip_blocks, ip_block_version->funcs->name); + dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n", + adev->num_ip_blocks, + ip_block_name(adev, ip_block_version->type), + ip_block_version->major, + ip_block_version->minor, + ip_block_version->rev, + ip_block_version->funcs->name); adev->ip_blocks[adev->num_ip_blocks].adev = adev; @@ -2525,9 +2566,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) } } - DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", - amdgpu_virtual_display, pci_address_name, - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info( + adev->dev, + "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display, adev->mode_info.num_crtc); kfree(pciaddstr); } @@ -2538,8 +2581,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - DRM_INFO("virtual_display:%d, num_crtc:%d\n", - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", + adev->enable_virtual_display, + adev->mode_info.num_crtc); } } @@ -2561,9 +2605,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->mman.discovery_bin) - return 0; - switch (adev->asic_type) { default: return 0; @@ -2585,8 +2626,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: + if (adev->mman.discovery_bin) + return 0; chip_name = "navi12"; break; + case CHIP_CYAN_SKILLFISH: + chip_name = "cyan_skillfish"; + break; } err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, @@ -2666,6 +2712,24 @@ out: return err; } +static void amdgpu_uid_init(struct amdgpu_device *adev) +{ + /* Initialize the UID for the device */ + adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL); + if (!adev->uid_info) { + dev_warn(adev->dev, "Failed to allocate memory for UID\n"); + return; + } + adev->uid_info->adev = adev; +} + +static void amdgpu_uid_fini(struct amdgpu_device *adev) +{ + /* Free the UID memory */ + kfree(adev->uid_info); + adev->uid_info = NULL; +} + /** * amdgpu_device_ip_early_init - run early init for hardware IPs * @@ -2773,21 +2837,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; + adev->virt.is_xgmi_node_migrate_enabled = false; + if (amdgpu_sriov_vf(adev)) { + adev->virt.is_xgmi_node_migrate_enabled = + amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4); + } + total = true; for (i = 0; i < adev->num_ip_blocks; i++) { ip_block = &adev->ip_blocks[i]; if ((amdgpu_ip_block_mask & (1 << i)) == 0) { - DRM_WARN("disabled ip block: %d <%s>\n", - i, adev->ip_blocks[i].version->funcs->name); + dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, + adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; } else if (ip_block->version->funcs->early_init) { r = ip_block->version->funcs->early_init(ip_block); if (r == -ENOENT) { adev->ip_blocks[i].status.valid = false; } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); total = false; } else { adev->ip_blocks[i].status.valid = true; @@ -2841,6 +2913,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (adev->gmc.xgmi.supported) amdgpu_xgmi_early_init(adev); + if (amdgpu_is_multi_aid(adev)) + amdgpu_uid_init(adev); ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); if (ip_block->status.valid != false) amdgpu_amdkfd_device_probe(adev); @@ -2868,8 +2942,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2893,8 +2969,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2932,8 +3009,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } else { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i] + .version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2988,25 +3068,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) r = drm_sched_init(&ring->sched, &args); if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create scheduler on ring %s.\n", + ring->name); return r; } r = amdgpu_uvd_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create UVD scheduling entity on ring %s.\n", + ring->name); return r; } r = amdgpu_vce_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create VCE scheduling entity on ring %s.\n", + ring->name); return r; } } - amdgpu_xcp_update_partition_sched_list(adev); + if (adev->xcp_mgr) + amdgpu_xcp_update_partition_sched_list(adev); return 0; } @@ -3038,8 +3122,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->sw_init) { r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); goto init_failed; } } @@ -3053,7 +3139,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do common hw init early so everything is set up for gmc */ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3065,17 +3152,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_device_mem_scratch_init(adev); if (r) { - DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_mem_scratch_init failed %d\n", + r); goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } r = amdgpu_device_wb_init(adev); if (r) { - DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_device_wb_init failed %d\n", r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3087,14 +3178,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) AMDGPU_GEM_DOMAIN_GTT, AMDGPU_CSA_SIZE); if (r) { - DRM_ERROR("allocate CSA failed %d\n", r); + dev_err(adev->dev, + "allocate CSA failed %d\n", r); goto init_failed; } } r = amdgpu_seq64_init(adev); if (r) { - DRM_ERROR("allocate seq64 failed %d\n", r); + dev_err(adev->dev, "allocate seq64 failed %d\n", + r); goto init_failed; } } @@ -3235,6 +3328,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * always assumed to be lost. */ switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_LEGACY: case AMD_RESET_METHOD_LINK: case AMD_RESET_METHOD_BACO: case AMD_RESET_METHOD_MODE1: @@ -3284,8 +3378,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3321,8 +3417,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3349,7 +3447,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void) for (i = 0; i < mgpu_info.num_dgpu; i++) { gpu_ins = &(mgpu_info.gpu_ins[i]); adev = gpu_ins->adev; - if (!(adev->flags & AMD_IS_APU) && + if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) && !gpu_ins->mgpu_fan_enabled) { ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); if (ret) @@ -3388,8 +3486,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("late_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "late_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3398,7 +3498,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_ras_late_init(adev); if (r) { - DRM_ERROR("amdgpu_ras_late_init failed %d", r); + dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); return r; } @@ -3412,7 +3512,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_device_enable_mgpu_fan_boost(); if (r) - DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); + dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ if (amdgpu_passthrough(adev) && @@ -3445,7 +3545,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_xgmi_set_pstate(gpu_instance->adev, AMDGPU_XGMI_PSTATE_MIN); if (r) { - DRM_ERROR("pstate setting failed (%d).\n", r); + dev_err(adev->dev, + "pstate setting failed (%d).\n", + r); break; } } @@ -3459,17 +3561,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; if (!ip_block->version->funcs->hw_fini) { - DRM_ERROR("hw_fini of IP block <%s> not defined\n", - ip_block->version->funcs->name); + dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); } else { r = ip_block->version->funcs->hw_fini(ip_block); /* XXX handle errors */ if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - ip_block->version->funcs->name, r); + dev_dbg(adev->dev, + "hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); } } @@ -3510,15 +3614,16 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { - DRM_DEBUG("early_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } } amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); amdgpu_userq_suspend(adev); /* Workaround for ASICs need to disable SMC first */ @@ -3533,7 +3638,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) { if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); + dev_err(adev->dev, + "failed to release exclusive mode on fini\n"); } return 0; @@ -3581,8 +3687,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); /* XXX handle errors */ if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); } } adev->ip_blocks[i].status.sw = false; @@ -3598,6 +3706,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) } amdgpu_ras_fini(adev); + amdgpu_uid_fini(adev); return 0; } @@ -3615,7 +3724,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) r = amdgpu_ib_ring_tests(adev); if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + dev_err(adev->dev, "ib ring test failed (%d).\n", r); } static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) @@ -3756,8 +3865,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); + dev_err(adev->dev, + "SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); return r; } } @@ -4041,12 +4151,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) /** * amdgpu_device_asic_has_dc_support - determine if DC supports the asic * + * @pdev : pci device context * @asic_type: AMD asic type * * Check if there is DC (new modesetting infrastructre) support for an asic. * returns true if DC has support, false if not. */ -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type) { switch (asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -4089,7 +4201,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #else default: if (amdgpu_dc > 0) - DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); + dev_info_once( + &pdev->dev, + "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); return false; #endif } @@ -4108,7 +4222,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; - return amdgpu_device_asic_has_dc_support(adev->asic_type); + return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type); } static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) @@ -4130,13 +4244,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { task_barrier_enter(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_enter(adev); if (adev->asic_reset_res) goto fail; task_barrier_exit(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_exit(adev); if (adev->asic_reset_res) goto fail; @@ -4150,7 +4264,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) fail: if (adev->asic_reset_res) - DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", + dev_warn(adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev_to_drm(adev)->unique); amdgpu_put_xgmi_hive(hive); } @@ -4164,18 +4279,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000 - * and 60000 for compute jobs. - * In SR-IOV or passthrough mode, timeout for compute - * jobs are 60000 by default. + * By default timeout for jobs is 10 sec */ - adev->gfx_timeout = msecs_to_jiffies(10000); + adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev)) - adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? - msecs_to_jiffies(60000) : msecs_to_jiffies(10000); - else - adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -4274,7 +4381,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) adev->gfx.mcbp = true; if (adev->gfx.mcbp) - DRM_INFO("MCBP is enabled\n"); + dev_info(adev->dev, "MCBP is enabled\n"); } /** @@ -4290,7 +4397,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { - struct drm_device *ddev = adev_to_drm(adev); struct pci_dev *pdev = adev->pdev; int r, i; bool px = false; @@ -4342,9 +4448,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); + dev_info( + adev->dev, + "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we * can recall function without having locking issues @@ -4461,8 +4569,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!adev->rmmio) return -ENOMEM; - DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); - DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); + dev_info(adev->dev, "register mmio base: 0x%08X\n", + (uint32_t)adev->rmmio_base); + dev_info(adev->dev, "register mmio size: %u\n", + (unsigned int)adev->rmmio_size); /* * Reset domain needs to be present early, before XGMI hive discovered @@ -4599,7 +4709,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = -EINVAL; goto failed; } - DRM_INFO("GPU posting now...\n"); + dev_info(adev->dev, "GPU posting now...\n"); r = amdgpu_device_asic_init(adev); if (r) { dev_err(adev->dev, "gpu post error!\n"); @@ -4709,12 +4819,12 @@ fence_driver_init: r = amdgpu_pm_sysfs_init(adev); if (r) - DRM_ERROR("registering pm sysfs failed (%d).\n", r); + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); r = amdgpu_ucode_sysfs_init(adev); if (r) { adev->ucode_sysfs_en = false; - DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); } else adev->ucode_sysfs_en = true; @@ -4747,7 +4857,7 @@ fence_driver_init: if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); - px = amdgpu_device_supports_px(ddev); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4913,7 +5023,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->xcp_mgr); adev->xcp_mgr = NULL; - px = amdgpu_device_supports_px(adev_to_drm(adev)); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4941,7 +5051,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) adev->reset_domain = NULL; kfree(adev->pci_state); - + kfree(adev->pcie_reset_ctx.swds_pcistate); + kfree(adev->pcie_reset_ctx.swus_pcistate); } /** @@ -4961,9 +5072,21 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) return 0; + /* No need to evict when going to S5 through S4 callbacks */ + if (system_state == SYSTEM_POWER_OFF) + return 0; + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - if (ret) - DRM_WARN("evicting device resources failed\n"); + if (ret) { + dev_warn(adev->dev, "evicting device resources failed\n"); + return ret; + } + + if (adev->in_s4) { + ret = ttm_device_prepare_hibernation(&adev->mman.bdev); + if (ret) + dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); + } return ret; } @@ -5035,6 +5158,28 @@ int amdgpu_device_prepare(struct drm_device *dev) } /** + * amdgpu_device_complete - complete power state transition + * + * @dev: drm dev pointer + * + * Undo the changes from amdgpu_device_prepare. This will be + * called on all resume transitions, including those that failed. + */ +void amdgpu_device_complete(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].version->funcs->complete) + continue; + adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]); + } +} + +/** * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer @@ -5055,14 +5200,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) adev->in_suspend = true; if (amdgpu_sriov_vf(adev)) { + if (!adev->in_runpm) + amdgpu_amdkfd_suspend_process(adev); amdgpu_virt_fini_data_exchange(adev); r = amdgpu_virt_request_full_gpu(adev, false); if (r) return r; } - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3)) + dev_warn(adev->dev, "smart shift update failed\n"); if (notify_clients) drm_client_dev_suspend(adev_to_drm(adev), false); @@ -5073,10 +5220,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) amdgpu_device_ip_suspend_phase1(adev); - if (!adev->in_s0ix) { - amdgpu_amdkfd_suspend(adev, adev->in_runpm); - amdgpu_userq_suspend(adev); - } + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + amdgpu_userq_suspend(adev); r = amdgpu_device_evict_resources(adev); if (r) @@ -5098,6 +5243,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) return 0; } +static inline int amdgpu_virt_resume(struct amdgpu_device *adev) +{ + int r; + unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; + + /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO) + * may not work. The access could be blocked by nBIF protection as VF isn't in + * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX + * so that QEMU reprograms MSIX table. + */ + amdgpu_restore_msix(adev); + + r = adev->gfxhub.funcs->get_xgmi_info(adev); + if (r) + return r; + + dev_info(adev->dev, "xgmi node, old id %d, new id %d\n", + prev_physical_node_id, adev->gmc.xgmi.physical_node_id); + + adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); + adev->vm_manager.vram_base_offset += + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + + return 0; +} + /** * amdgpu_device_resume - initiate device resume * @@ -5119,6 +5290,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) return r; } + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + r = amdgpu_virt_resume(adev); + if (r) + goto exit; + } + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -5139,15 +5316,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) goto exit; } - if (!adev->in_s0ix) { - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); - if (r) - goto exit; + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + if (r) + goto exit; - r = amdgpu_userq_resume(adev); - if (r) - goto exit; - } + r = amdgpu_userq_resume(adev); + if (r) + goto exit; r = amdgpu_device_ip_late_init(adev); if (r) @@ -5159,6 +5334,9 @@ exit: if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); + + if (!r && !adev->in_runpm) + r = amdgpu_amdkfd_resume_process(adev); } if (r) @@ -5197,8 +5375,8 @@ exit: amdgpu_vram_mgr_clear_reset_blocks(adev); adev->in_suspend = false; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0)) + dev_warn(adev->dev, "smart shift update failed\n"); return 0; } @@ -5583,7 +5761,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev) dev_info(adev->dev, "GPU link reset\n"); - if (!adev->pcie_reset_ctx.occurs_dpc) + if (!amdgpu_reset_in_dpc(adev)) ret = amdgpu_dpm_link_reset(adev); if (ret) @@ -5712,6 +5890,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) amdgpu_set_init_level(tmp_adev, init_level); if (full_reset) { /* post card */ + amdgpu_reset_set_dpc_status(tmp_adev, false); amdgpu_ras_clear_err_state(tmp_adev); r = amdgpu_device_asic_init(tmp_adev); if (r) { @@ -5729,7 +5908,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); if (vram_lost) { - DRM_INFO("VRAM is lost due to GPU reset!\n"); + dev_info( + tmp_adev->dev, + "VRAM is lost due to GPU reset!\n"); amdgpu_inc_vram_lost(tmp_adev); } @@ -6008,25 +6189,19 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle) { struct amdgpu_device *tmp_adev; int ret = 0; - u32 status; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); - if (PCI_POSSIBLE_ERROR(status)) { - dev_err(tmp_adev->dev, "device lost from bus!"); - ret = -ENODEV; - } + ret |= amdgpu_device_bus_status_check(tmp_adev); } return ret; } -static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev, +static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev, struct list_head *device_list, struct amdgpu_hive_info *hive) { struct amdgpu_device *tmp_adev = NULL; - int r; /* * Build list of devices to reset. @@ -6038,7 +6213,7 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev, list_add_tail(&tmp_adev->reset_list, device_list); if (adev->shutdown) tmp_adev->shutdown = true; - if (adev->pcie_reset_ctx.occurs_dpc) + if (amdgpu_reset_in_dpc(adev)) tmp_adev->pcie_reset_ctx.in_link_reset = true; } if (!list_is_first(&adev->reset_list, device_list)) @@ -6046,14 +6221,6 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev, } else { list_add_tail(&adev->reset_list, device_list); } - - if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) { - r = amdgpu_device_health_check(device_list); - if (r) - return r; - } - - return 0; } static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev, @@ -6080,14 +6247,15 @@ static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev, amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); } -static int amdgpu_device_halt_activities( - struct amdgpu_device *adev, struct amdgpu_job *job, - struct amdgpu_reset_context *reset_context, - struct list_head *device_list, struct amdgpu_hive_info *hive, - bool need_emergency_restart) +static void amdgpu_device_halt_activities(struct amdgpu_device *adev, + struct amdgpu_job *job, + struct amdgpu_reset_context *reset_context, + struct list_head *device_list, + struct amdgpu_hive_info *hive, + bool need_emergency_restart) { struct amdgpu_device *tmp_adev = NULL; - int i, r = 0; + int i; /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list, reset_list) { @@ -6121,9 +6289,8 @@ static int amdgpu_device_halt_activities( drm_client_dev_suspend(adev_to_drm(tmp_adev), false); /* disable ras on ALL IPs */ - if (!need_emergency_restart && - (!adev->pcie_reset_ctx.occurs_dpc) && - amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && + amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -6139,8 +6306,6 @@ static int amdgpu_device_halt_activities( } atomic_inc(&tmp_adev->gpu_reset_counter); } - - return r; } static int amdgpu_device_asic_reset(struct amdgpu_device *adev, @@ -6153,11 +6318,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev, retry: /* Rest of adevs pre asic reset from XGMI hive. */ list_for_each_entry(tmp_adev, device_list, reset_list) { - if (adev->pcie_reset_ctx.occurs_dpc) - tmp_adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); - if (adev->pcie_reset_ctx.occurs_dpc) - tmp_adev->no_hw_access = false; /*TODO Should we stop ?*/ if (r) { dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", @@ -6245,8 +6406,10 @@ static int amdgpu_device_sched_resume(struct list_head *device_list, amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); - if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(tmp_adev, + AMDGPU_SS_DEV_D0)) + dev_warn(tmp_adev->dev, + "smart shift update failed\n"); } } @@ -6327,14 +6490,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ if (need_emergency_restart && amdgpu_ras_get_context(adev) && amdgpu_ras_get_context(adev)->reboot) { - DRM_WARN("Emergency reboot."); + dev_warn(adev->dev, "Emergency reboot."); ksys_sync_helper(); emergency_restart(); } - dev_info(adev->dev, "GPU %s begin!\n", - need_emergency_restart ? "jobs stop":"reset"); + dev_info(adev->dev, "GPU %s begin!. Source: %d\n", + need_emergency_restart ? "jobs stop" : "reset", + reset_context->src); if (!amdgpu_sriov_vf(adev)) hive = amdgpu_get_xgmi_hive(adev); @@ -6345,17 +6509,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, reset_context->hive = hive; INIT_LIST_HEAD(&device_list); - if (amdgpu_device_recovery_prepare(adev, &device_list, hive)) - goto end_reset; + amdgpu_device_recovery_prepare(adev, &device_list, hive); + + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_device_health_check(&device_list); + if (r) + goto end_reset; + } /* We need to lock reset domain only once both for XGMI and single device */ amdgpu_device_recovery_get_reset_lock(adev, &device_list); - r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list, - hive, need_emergency_restart); - if (r) - goto reset_unlock; - + amdgpu_device_halt_activities(adev, job, reset_context, &device_list, + hive, need_emergency_restart); if (need_emergency_restart) goto skip_sched_resume; /* @@ -6392,8 +6558,17 @@ end_reset: atomic_set(&adev->reset_domain->reset_res, r); - if (!r) - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE); + if (!r) { + struct amdgpu_task_info *ti = NULL; + + if (job) + ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid); + + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, + ti ? &ti->task : NULL); + + amdgpu_vm_put_task_info(ti); + } return r; } @@ -6712,12 +6887,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, #endif } -int amdgpu_device_baco_enter(struct drm_device *dev) +int amdgpu_device_baco_enter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; if (ras && adev->ras_enabled && @@ -6727,13 +6901,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev) return amdgpu_dpm_baco_enter(adev); } -int amdgpu_device_baco_exit(struct drm_device *dev) +int amdgpu_device_baco_exit(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); @@ -6764,18 +6937,13 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta { struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + struct amdgpu_hive_info *hive __free(xgmi_put_hive) = + amdgpu_get_xgmi_hive(adev); struct amdgpu_reset_context reset_context; struct list_head device_list; - int r = 0; dev_info(adev->dev, "PCI error: detected callback!!\n"); - if (!amdgpu_dpm_is_link_reset_supported(adev)) { - dev_warn(adev->dev, "No support for XGMI hive yet...\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - adev->pci_channel_state = state; switch (state) { @@ -6785,23 +6953,32 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state); + if (hive) { + /* Hive devices should be able to support FW based + * link reset on other devices, if not return. + */ + if (!amdgpu_dpm_is_link_reset_supported(adev)) { + dev_warn(adev->dev, + "No support for XGMI hive yet...\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + /* Set dpc status only if device is part of hive + * Non-hive devices should be able to recover after + * link reset. + */ + amdgpu_reset_set_dpc_status(adev, true); - if (hive) mutex_lock(&hive->hive_lock); - adev->pcie_reset_ctx.occurs_dpc = true; + } memset(&reset_context, 0, sizeof(reset_context)); INIT_LIST_HEAD(&device_list); amdgpu_device_recovery_prepare(adev, &device_list, hive); amdgpu_device_recovery_get_reset_lock(adev, &device_list); - r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, - hive, false); - if (hive) { + amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, + hive, false); + if (hive) mutex_unlock(&hive->hive_lock); - amdgpu_put_xgmi_hive(hive); - } - if (r) - return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ @@ -6849,22 +7026,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) struct amdgpu_device *tmp_adev; struct amdgpu_hive_info *hive; struct list_head device_list; - int r = 0, i; + struct pci_dev *link_dev; + int r = 0, i, timeout; u32 memsize; - - /* PCI error slot reset should be skipped During RAS recovery */ - if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) && - amdgpu_ras_in_recovery(adev)) - return PCI_ERS_RESULT_RECOVERED; + u16 status; dev_info(adev->dev, "PCI error: slot reset callback!!\n"); memset(&reset_context, 0, sizeof(reset_context)); - /* wait for asic to come out of reset */ - msleep(700); + if (adev->pcie_reset_ctx.swus) + link_dev = adev->pcie_reset_ctx.swus; + else + link_dev = adev->pdev; + /* wait for asic to come out of reset, timeout = 10s */ + timeout = 10000; + do { + usleep_range(10000, 10500); + r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status); + timeout -= 10; + } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) && + (status != PCI_VENDOR_ID_AMD)); + + if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) { + r = -ETIME; + goto out; + } + amdgpu_device_load_switch_state(adev); /* Restore PCI confspace */ amdgpu_device_load_pci_state(pdev); @@ -6959,7 +7148,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev) amdgpu_device_sched_resume(&device_list, NULL, NULL); amdgpu_device_gpu_resume(adev, &device_list, false); amdgpu_device_recovery_put_reset_lock(adev, &device_list); - adev->pcie_reset_ctx.occurs_dpc = false; if (hive) { mutex_unlock(&hive->hive_lock); @@ -6967,6 +7155,58 @@ void amdgpu_pci_resume(struct pci_dev *pdev) } } +static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev) +{ + struct pci_dev *parent = pci_upstream_bridge(adev->pdev); + int r; + + if (!parent || parent->vendor != PCI_VENDOR_ID_ATI) + return; + + /* If already saved, return */ + if (adev->pcie_reset_ctx.swus) + return; + /* Upstream bridge is ATI, assume it's SWUS/DS architecture */ + r = pci_save_state(parent); + if (r) + return; + adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent); + + parent = pci_upstream_bridge(parent); + r = pci_save_state(parent); + if (r) + return; + adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent); + + adev->pcie_reset_ctx.swus = parent; +} + +static void amdgpu_device_load_switch_state(struct amdgpu_device *adev) +{ + struct pci_dev *pdev; + int r; + + if (!adev->pcie_reset_ctx.swds_pcistate || + !adev->pcie_reset_ctx.swus_pcistate) + return; + + pdev = adev->pcie_reset_ctx.swus; + r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate); + if (!r) { + pci_restore_state(pdev); + } else { + dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r); + return; + } + + pdev = pci_upstream_bridge(adev->pdev); + r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate); + if (!r) + pci_restore_state(pdev); + else + dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r); +} + bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -6983,14 +7223,16 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) adev->pci_state = pci_store_saved_state(pdev); if (!adev->pci_state) { - DRM_ERROR("Failed to store PCI saved state"); + dev_err(adev->dev, "Failed to store PCI saved state"); return false; } } else { - DRM_WARN("Failed to save PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); return false; } + amdgpu_device_cache_switch_state(adev); + return true; } @@ -7008,7 +7250,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev) if (!r) { pci_restore_state(pdev); } else { - DRM_WARN("Failed to load PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); return false; } @@ -7254,7 +7496,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, dep = amdgpu_sync_peek_fence(&isolation->prev, ring); r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); if (r) - DRM_WARN("OOM tracking isolation\n"); + dev_warn(adev->dev, "OOM tracking isolation\n"); out_grab_ref: dma_fence_get(dep); @@ -7322,9 +7564,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, tmp_ = RREG32(reg_addr); loop--; if (!loop) { - DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", - inst, reg_name, (uint32_t)expected_value, - (uint32_t)(tmp_ & (mask))); + dev_warn( + adev->dev, + "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", + inst, reg_name, (uint32_t)expected_value, + (uint32_t)(tmp_ & (mask))); ret = -ETIMEDOUT; break; } @@ -7375,3 +7619,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) size += sysfs_emit_at(buf, size, "\n"); return size; } + +void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst, + uint64_t uid) +{ + if (!uid_info) + return; + + if (type >= AMDGPU_UID_TYPE_MAX) { + dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", + type); + return; + } + + if (inst >= AMDGPU_UID_INST_MAX) { + dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", + inst); + return; + } + + if (uid_info->uid[type][inst] != 0) { + dev_warn_once( + uid_info->adev->dev, + "Overwriting existing UID %llu for type %d instance %d\n", + uid_info->uid[type][inst], type, inst); + } + + uid_info->uid[type][inst] = uid; +} + +u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst) +{ + if (!uid_info) + return 0; + + if (type >= AMDGPU_UID_TYPE_MAX) { + dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", + type); + return 0; + } + + if (inst >= AMDGPU_UID_INST_MAX) { + dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", + inst); + return 0; + } + + return uid_info->uid[type][inst]; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 81b3443c8d7f..73401f0aeb34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, u32 msg; if (!amdgpu_sriov_vf(adev)) { - /* It can take up to a second for IFWI init to complete on some dGPUs, + /* It can take up to two second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts * as soon as the device gets power so by the time the OS loads this has long * completed. However, when a card is hotplugged via e.g., USB4, we need to @@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * continue. */ - for (i = 0; i < 1000; i++) { + for (i = 0; i < 2000; i++) { msg = RREG32(mmMP0_SMN_C2PMSG_33); if (msg & 0x80000000) break; @@ -2124,7 +2124,6 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 7): - case IP_VERSION(11, 0, 8): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): @@ -2132,6 +2131,10 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 5, 2): amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); break; + case IP_VERSION(11, 0, 8): + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); @@ -2555,40 +2558,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - /* this is not fatal. We have a fallback below - * if the new firmwares are not present. some of - * this will be overridden below to keep things - * consistent with the current behavior. + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. */ - r = amdgpu_discovery_reg_base_init(adev); - if (!r) { - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - } - break; - default: - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - drm_err(&adev->ddev, "discovery failed: %d\n", r); - return r; - } - - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - break; - } - - switch (adev->asic_type) { - case CHIP_VEGA10: + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2611,6 +2585,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); break; case CHIP_VEGA12: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2633,6 +2612,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); break; case CHIP_RAVEN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; adev->vcn.num_vcn_inst = 1; @@ -2674,6 +2658,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } break; case CHIP_VEGA20: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 8; @@ -2697,6 +2686,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); break; case CHIP_ARCTURUS: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; adev->vcn.num_vcn_inst = 2; @@ -2725,6 +2719,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); break; case CHIP_ALDEBARAN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; adev->vcn.num_vcn_inst = 2; @@ -2750,7 +2749,47 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; + case CHIP_CYAN_SKILLFISH: + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { + r = amdgpu_discovery_reg_base_init(adev); + if (r) + return -EINVAL; + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); + } else { + cyan_skillfish_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); + } + break; default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + drm_err(&adev->ddev, "discovery failed: %d\n", r); + return r; + } + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 35c778426a7c..51bab32fd8c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd); /* Verify that the modifier is supported. */ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { @@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct amdgpu_framebuffer *amdgpu_fb; @@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ bo = gem_to_amdgpu_bo(obj); domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); - if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); drm_gem_object_put(obj); return ERR_PTR(-EINVAL); @@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, - mode_cmd, obj); + info, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index dfa0d642ac16..930c171473b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 44e120f9f764..8561ad7f6180 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, return ret; } +static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + int ret; + + /* + * Pin to keep buffer in place while it's vmap'ed. The actual + * domain is not that important as long as it's mapable. Using + * GTT and VRAM should be compatible with most use cases. + */ + ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM); + if (ret) + return ret; + ret = drm_gem_dmabuf_vmap(dma_buf, map); + if (ret) + amdgpu_bo_unpin(bo); + + return ret; +} + +static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + drm_gem_dmabuf_vunmap(dma_buf, map); + amdgpu_bo_unpin(bo); +} + const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_dma_buf_attach, .pin = amdgpu_dma_buf_pin, @@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, .mmap = drm_gem_dmabuf_mmap, - .vmap = drm_gem_dmabuf_vmap, - .vunmap = drm_gem_dmabuf_vunmap, + .vmap = amdgpu_dma_buf_vmap, + .vunmap = amdgpu_dma_buf_vunmap, }; /** @@ -313,11 +343,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct dma_buf *buf; + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = false, + }; + int ret; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); + ret = ttm_bo_setup_export(&bo->tbo, &ctx); + if (ret) + return ERR_PTR(ret); + buf = drm_gem_prime_export(gobj, flags); if (!IS_ERR(buf)) buf->ops = &amdgpu_dmabuf_ops; @@ -513,7 +555,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, if (!adev) return false; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; if (dma_buf->ops != &amdgpu_dmabuf_ops) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c index 3f3662e8b871..3040437d99c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return readl(adev->doorbell.cpu_addr + index); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) if (index < adev->doorbell.num_kernel_doorbells) writel(v, adev->doorbell.cpu_addr + index); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index)); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) if (index < adev->doorbell.num_kernel_doorbells) atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev) NULL, (void **)&adev->doorbell.cpu_addr); if (r) { - DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r); + dev_err(adev->dev, + "Failed to allocate kernel doorbells, err=%d\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4db92e0a60da..bff25ef3e2d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,6 +144,7 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6), AMDGPU_DEBUG_SMU_POOL = BIT(7), AMDGPU_DEBUG_VM_USERPTR = BIT(8), + AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -361,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * The second one is for Compute. The third and fourth ones are * for SDMA and Video. * - * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) - * jobs is 10000. The timeout for compute is 60000. + * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); +MODULE_PARM_DESC(lockup_timeout, + "GPU lockup timeout in ms (default: 10000 for all jobs. " + "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -885,7 +886,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); /** * DOC: dcdebugmask (uint) - * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h. + * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h. */ MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))"); module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444); @@ -959,7 +960,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444); */ MODULE_PARM_DESC( freesync_video, - "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); + "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)"); module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); /** @@ -2171,6 +2172,11 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, /* CYAN_SKILLFISH */ + {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, {0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, @@ -2278,6 +2284,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: VM mode debug for userptr is enabled\n"); adev->debug_vm_userptr = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) { + pr_info("debug: disable kernel logs of correctable errors\n"); + adev->debug_disable_ce_logs = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2321,7 +2332,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, amdgpu_aspm = 0; if (amdgpu_virtual_display || - amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) + amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) supports_atomic = true; if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { @@ -2451,10 +2462,10 @@ retry_init: if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { /* only need to skip on ATPX */ - if (amdgpu_device_supports_px(ddev)) + if (amdgpu_device_supports_px(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); /* we want direct complete for BOCO */ - if (amdgpu_device_supports_boco(ddev)) + if (amdgpu_device_supports_boco(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND | DPM_FLAG_MAY_SKIP_RESUME); @@ -2487,9 +2498,9 @@ retry_init: * into D0 state. Then there will be a PMFW-aware D-state * transition(D0->D3) on runpm suspend. */ - if (amdgpu_device_supports_baco(ddev) && + if (amdgpu_device_supports_baco(adev) && !(adev->flags & AMD_IS_APU) && - (adev->asic_type >= CHIP_NAVI10)) + adev->asic_type >= CHIP_NAVI10) amdgpu_get_secondary_funcs(adev); } @@ -2506,6 +2517,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); + amdgpu_ras_eeprom_check_and_recover(adev); amdgpu_xcp_dev_unplug(adev); amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); @@ -2535,6 +2547,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) if (amdgpu_ras_intr_triggered()) return; + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -2551,11 +2567,14 @@ static int amdgpu_pmops_prepare(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; + /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev) && - pm_runtime_suspended(dev)) + if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev)) return 1; /* if we will not support s3 or s2i for the device @@ -2570,7 +2589,7 @@ static int amdgpu_pmops_prepare(struct device *dev) static void amdgpu_pmops_complete(struct device *dev) { - /* nothing to do */ + amdgpu_device_complete(dev_get_drvdata(dev)); } static int amdgpu_pmops_suspend(struct device *dev) @@ -2583,6 +2602,7 @@ static int amdgpu_pmops_suspend(struct device *dev) else if (amdgpu_acpi_is_s3_active(adev)) adev->in_s3 = true; if (!adev->in_s0ix && !adev->in_s3) { +#if IS_ENABLED(CONFIG_SUSPEND) /* don't allow going deep first time followed by s2idle the next time */ if (adev->last_suspend_state != PM_SUSPEND_ON && adev->last_suspend_state != pm_suspend_target_state) { @@ -2590,11 +2610,14 @@ static int amdgpu_pmops_suspend(struct device *dev) pm_suspend_target_state); return -EINVAL; } +#endif return 0; } +#if IS_ENABLED(CONFIG_SUSPEND) /* cache the state last used for suspend */ adev->last_suspend_state = pm_suspend_target_state; +#endif return amdgpu_device_suspend(drm_dev, true); } @@ -2650,12 +2673,21 @@ static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + /* do not resume device if it's normal hibernation */ + if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend()) + return 0; + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; return amdgpu_device_suspend(drm_dev, true); } @@ -2828,7 +2860,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* nothing to do */ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_enter(drm_dev); + amdgpu_device_baco_enter(adev); } dev_dbg(&pdev->dev, "asic/device is runtime suspended\n"); @@ -2869,7 +2901,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_exit(drm_dev); + amdgpu_device_baco_exit(adev); } ret = amdgpu_device_resume(drm_dev, false); if (ret) { @@ -2910,11 +2942,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp) { struct drm_file *file_priv = filp->private_data; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; + struct drm_device *dev = file_priv->minor->dev; + int idx; - if (fpriv) { + if (fpriv && drm_dev_enter(dev, &idx)) { fpriv->evf_mgr.fd_closing = true; amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); amdgpu_userq_mgr_fini(&fpriv->userq_mgr); + drm_dev_exit(idx); } return drm_release(inode, filp); @@ -2941,15 +2976,15 @@ out: } static const struct dev_pm_ops amdgpu_pm_ops = { - .prepare = amdgpu_pmops_prepare, - .complete = amdgpu_pmops_complete, - .suspend = amdgpu_pmops_suspend, - .suspend_noirq = amdgpu_pmops_suspend_noirq, - .resume = amdgpu_pmops_resume, - .freeze = amdgpu_pmops_freeze, - .thaw = amdgpu_pmops_thaw, - .poweroff = amdgpu_pmops_poweroff, - .restore = amdgpu_pmops_restore, + .prepare = pm_sleep_ptr(amdgpu_pmops_prepare), + .complete = pm_sleep_ptr(amdgpu_pmops_complete), + .suspend = pm_sleep_ptr(amdgpu_pmops_suspend), + .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq), + .resume = pm_sleep_ptr(amdgpu_pmops_resume), + .freeze = pm_sleep_ptr(amdgpu_pmops_freeze), + .thaw = pm_sleep_ptr(amdgpu_pmops_thaw), + .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff), + .restore = pm_sleep_ptr(amdgpu_pmops_restore), .runtime_suspend = amdgpu_pmops_runtime_suspend, .runtime_resume = amdgpu_pmops_runtime_resume, .runtime_idle = amdgpu_pmops_runtime_idle, @@ -3021,6 +3056,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct drm_driver amdgpu_kms_driver = { @@ -3094,7 +3130,7 @@ static struct pci_driver amdgpu_kms_pci_driver = { .probe = amdgpu_pci_probe, .remove = amdgpu_pci_remove, .shutdown = amdgpu_pci_shutdown, - .driver.pm = &amdgpu_pm_ops, + .driver.pm = pm_ptr(&amdgpu_pm_ops), .err_handler = &amdgpu_pci_err_handler, .dev_groups = amdgpu_sysfs_groups, }; @@ -3107,10 +3143,6 @@ static int __init amdgpu_init(void) if (r) goto error_sync; - r = amdgpu_fence_slab_init(); - if (r) - goto error_fence; - r = amdgpu_userq_fence_slab_init(); if (r) goto error_fence; @@ -3145,7 +3177,6 @@ static void __exit amdgpu_exit(void) amdgpu_unregister_atpx_handler(); amdgpu_acpi_release(); amdgpu_sync_fini(); - amdgpu_fence_slab_fini(); amdgpu_userq_fence_slab_fini(); mmu_notifier_synchronize(); amdgpu_xcp_drv_release(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c index 8b919ad3af29..23d7d0b0d625 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c @@ -143,7 +143,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f) } static const struct dma_fence_ops amdgpu_eviction_fence_ops = { - .use_64bit_seqno = true, .get_driver_name = amdgpu_eviction_fence_get_driver_name, .get_timeline_name = amdgpu_eviction_fence_get_timeline_name, .enable_signaling = amdgpu_eviction_fence_enable_signaling, @@ -169,9 +168,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr) ev_fence->evf_mgr = evf_mgr; get_task_comm(ev_fence->timeline_name, current); spin_lock_init(&ev_fence->lock); - dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops, - &ev_fence->lock, evf_mgr->ev_fence_ctx, - atomic_inc_return(&evf_mgr->ev_fence_seq)); + dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops, + &ev_fence->lock, evf_mgr->ev_fence_ctx, + atomic_inc_return(&evf_mgr->ev_fence_seq)); return ev_fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 91d638098889..b349bb3676d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) [AMDGPU_PL_GWS] = "gws", [AMDGPU_PL_OA] = "oa", [AMDGPU_PL_DOORBELL] = "doorbell", + [AMDGPU_PL_MMIO_REMAP] = "mmioremap", }; unsigned int hw_ip, i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 5fec808d7f54..fd8cca241da6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -41,21 +41,6 @@ #include "amdgpu_trace.h" #include "amdgpu_reset.h" -static struct kmem_cache *amdgpu_fence_slab; - -int amdgpu_fence_slab_init(void) -{ - amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN); - if (!amdgpu_fence_slab) - return -ENOMEM; - return 0; -} - -void amdgpu_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(amdgpu_fence_slab); -} /* * Cast helper */ @@ -114,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * * @ring: ring the fence is associated with * @f: resulting fence object - * @job: job the fence is embedded in + * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, - unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; @@ -130,40 +115,34 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd uint32_t seq; int r; - if (job == NULL) { - /* create a sperate hw fence */ - am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); - if (am_fence == NULL) + if (!af) { + /* create a separate hw fence */ + am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); + if (!am_fence) return -ENOMEM; } else { - /* take use of job-embedded fence */ - am_fence = &job->hw_fence; + am_fence = af; } fence = &am_fence->base; am_fence->ring = ring; seq = ++ring->fence_drv.sync_seq; - if (job && job->job_run_counter) { - /* reinit seq for resubmitted jobs */ - fence->seqno = seq; - /* TO be inline with external fence creation and other drivers */ + am_fence->seq = seq; + if (af) { + dma_fence_init(fence, &amdgpu_job_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); + /* Against remove in amdgpu_job_{free, free_cb} */ dma_fence_get(fence); } else { - if (job) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); } amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); + amdgpu_fence_save_wptr(fence); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -276,6 +255,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) do { struct dma_fence *fence, **ptr; + struct amdgpu_fence *am_fence; ++last_seq; last_seq &= drv->num_fences_mask; @@ -288,6 +268,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; + /* Save the wptr in the fence driver so we know what the last processed + * wptr was. This is required for re-emitting the ring state for + * queues that are reset but are not guilty and thus have no guilty fence. + */ + am_fence = container_of(fence, struct amdgpu_fence, base); + drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -310,7 +296,9 @@ static void amdgpu_fence_fallback(struct timer_list *t) fence_drv.fallback_timer); if (amdgpu_fence_process(ring)) - DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); + dev_warn(ring->adev->dev, + "Fence fallback timer expired on ring %s\n", + ring->name); } /** @@ -748,6 +736,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) amdgpu_fence_process(ring); } + +/* + * Kernel queue reset handling + * + * The driver can reset individual queues for most engines, but those queues + * may contain work from multiple contexts. Resetting the queue will reset + * lose all of that state. In order to minimize the collateral damage, the + * driver will save the ring contents which are not associated with the guilty + * context prior to resetting the queue. After resetting the queue the queue + * contents from the other contexts is re-emitted to the rings so that it can + * be processed by the engine. To handle this, we save the queue's write + * pointer (wptr) in the fences associated with each context. If we get a + * queue timeout, we can then use the wptrs from the fences to determine + * which data needs to be saved out of the queue's ring buffer. + */ + +/** + * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence + * + * @fence: fence of the ring to signal + * + */ +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +{ + dma_fence_set_error(&fence->base, -ETIME); + amdgpu_fence_write(fence->ring, fence->seq); + amdgpu_fence_process(fence->ring); +} + +void amdgpu_fence_save_wptr(struct dma_fence *fence) +{ + struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); + + am_fence->wptr = am_fence->ring->wptr; +} + +static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, + u64 start_wptr, u32 end_wptr) +{ + unsigned int first_idx = start_wptr & ring->buf_mask; + unsigned int last_idx = end_wptr & ring->buf_mask; + unsigned int i; + + /* Backup the contents of the ring buffer. */ + for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) + ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; +} + +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + struct dma_fence *unprocessed; + struct dma_fence __rcu **ptr; + struct amdgpu_fence *fence; + u64 wptr, i, seqno; + + seqno = amdgpu_fence_read(ring); + wptr = ring->fence_drv.signalled_wptr; + ring->ring_backup_entries_to_copy = 0; + + for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { + ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; + rcu_read_lock(); + unprocessed = rcu_dereference(*ptr); + + if (unprocessed && !dma_fence_is_signaled(unprocessed)) { + fence = container_of(unprocessed, struct amdgpu_fence, base); + + /* save everything if the ring is not guilty, otherwise + * just save the content from other contexts. + */ + if (!guilty_fence || (fence->context != guilty_fence->context)) + amdgpu_ring_backup_unprocessed_command(ring, wptr, + fence->wptr); + wptr = fence->wptr; + } + rcu_read_unlock(); + } +} + /* * Common fence implementation */ @@ -814,7 +882,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free fence_slab if it's separated fence*/ - kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); + kfree(to_amdgpu_fence(f)); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 1ae88c459da5..b0082aa7f3c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { - DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); + dev_warn(adev->dev, + "Cannot access FRU, EEPROM accessor not initialized"); return -ENODEV; } @@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf, sizeof(buf)); if (len != 8) { - DRM_ERROR("Couldn't read the IPMI Common Header: %d", len); + dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d", + len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x", + buf[0]); return -EIO; } for (csum = 0; len > 0; len--) csum += buf[len - 1]; if (csum) { - DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x", + csum); return -EIO; } @@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* Read the header of the PIA. */ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3); if (len != 3) { - DRM_ERROR("Couldn't read the Product Info Area header: %d", len); + dev_err(adev->dev, + "Couldn't read the Product Info Area header: %d", len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x", + buf[0]); return -EIO; } @@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size); if (len != size) { kfree(pia); - DRM_ERROR("Couldn't read the Product Info Area: %d", len); + dev_err(adev->dev, "Couldn't read the Product Info Area: %d", + len); return len < 0 ? len : -EIO; } for (csum = 0; size > 0; size--) csum += pia[size - 1]; if (csum) { - DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x", + csum); kfree(pia); return -EIO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 0ecc88df7208..b7ebae289bea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -317,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!obj->import_attach || + if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); @@ -329,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, dev_warn(adev->dev, "validate_and_fence failed: %d\n", r); if (ti) { - dev_warn(adev->dev, "pid %d\n", ti->pid); + dev_warn(adev->dev, "pid %d\n", ti->task.pid); amdgpu_vm_put_task_info(ti); } } @@ -443,15 +443,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, int r; /* reject invalid gem flags */ - if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | - AMDGPU_GEM_CREATE_EXPLICIT_SYNC | - AMDGPU_GEM_CREATE_ENCRYPTED | - AMDGPU_GEM_CREATE_GFX12_DCC | - AMDGPU_GEM_CREATE_DISCARDABLE)) + if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK) return -EINVAL; /* reject invalid gem domains */ @@ -466,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, /* always clear VRAM */ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; + if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP) + return -EINVAL; + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { @@ -577,8 +572,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, - &range); + r = amdgpu_ttm_tt_get_user_pages(bo, &range); if (r) goto release_object; @@ -586,6 +580,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto user_pages_done; + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); @@ -791,36 +787,6 @@ error: return fence; } -/** - * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags - * - * @adev: amdgpu_device pointer - * @flags: GEM UAPI flags - * - * Returns the GEM UAPI flags mapped into hardware for the ASIC. - */ -uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT_FLAG(adev); - if (flags & AMDGPU_VM_PAGE_NOALLOC) - pte_flag |= AMDGPU_PTE_NOALLOC; - - if (adev->gmc.gmc_funcs->map_mtype) - pte_flag |= amdgpu_gmc_map_mtype(adev, - flags & AMDGPU_VM_MTYPE_MASK); - - return pte_flag; -} - int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -841,7 +807,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct dma_fence_chain *timeline_chain = NULL; struct dma_fence *fence; struct drm_exec exec; - uint64_t va_flags; uint64_t vm_size; int r = 0; @@ -945,10 +910,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, - va_flags); + args->flags); break; case AMDGPU_VA_OP_UNMAP: r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); @@ -960,10 +924,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, - va_flags); + args->flags); break; default: break; @@ -997,17 +960,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gobj; struct amdgpu_vm_bo_base *base; struct amdgpu_bo *robj; + struct drm_exec exec; + struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; + if (args->padding) + return -EINVAL; + gobj = drm_gem_object_lookup(filp, args->handle); if (!gobj) return -ENOENT; robj = gem_to_amdgpu_bo(gobj); - r = amdgpu_bo_reserve(robj, false); - if (unlikely(r)) - goto out; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, gobj); + drm_exec_retry_on_contention(&exec); + if (r) + goto out_exec; + + if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) { + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (r) + goto out_exec; + } + } switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { @@ -1018,29 +998,26 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; info.domains = robj->preferred_domains; info.domain_flags = robj->flags; - amdgpu_bo_unreserve(robj); + drm_exec_fini(&exec); if (copy_to_user(out, &info, sizeof(info))) r = -EFAULT; break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->tbo.base.import_attach && + if (drm_gem_is_imported(&robj->tbo.base) && args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; - amdgpu_bo_unreserve(robj); - break; + goto out_exec; } if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { r = -EPERM; - amdgpu_bo_unreserve(robj); - break; + goto out_exec; } for (base = robj->vm_bo; base; base = base->next) if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { r = -EINVAL; - amdgpu_bo_unreserve(robj); - goto out; + goto out_exec; } @@ -1053,17 +1030,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) amdgpu_vm_bo_invalidate(robj, true); + drm_exec_fini(&exec); + break; + case AMDGPU_GEM_OP_GET_MAPPING_INFO: { + struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj); + struct drm_amdgpu_gem_vm_entry *vm_entries; + struct amdgpu_bo_va_mapping *mapping; + int num_mappings = 0; + /* + * num_entries is set as an input to the size of the user-allocated array of + * drm_amdgpu_gem_vm_entry stored at args->value. + * num_entries is sent back as output as the number of mappings the bo has. + * If that number is larger than the size of the array, the ioctl must + * be retried. + */ + vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL); + if (!vm_entries) + return -ENOMEM; + + amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) { + if (num_mappings < args->num_entries) { + vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].offset = mapping->offset; + vm_entries[num_mappings].flags = mapping->flags; + } + num_mappings += 1; + } + + amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) { + if (num_mappings < args->num_entries) { + vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].offset = mapping->offset; + vm_entries[num_mappings].flags = mapping->flags; + } + num_mappings += 1; + } + + drm_exec_fini(&exec); + + if (num_mappings > 0 && num_mappings <= args->num_entries) + if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries))) + r = -EFAULT; + + args->num_entries = num_mappings; - amdgpu_bo_unreserve(robj); + kvfree(vm_entries); break; + } default: - amdgpu_bo_unreserve(robj); + drm_exec_fini(&exec); r = -EINVAL; } -out: drm_gem_object_put(gobj); return r; +out_exec: + drm_exec_fini(&exec); + drm_gem_object_put(gobj); + return r; +} + +/** + * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects + * + * @dev: drm device pointer + * @data: drm_amdgpu_gem_list_handles + * @filp: drm file pointer + * + * num_entries is set as an input to the size of the entries array. + * num_entries is sent back as output as the number of bos in the process. + * If that number is larger than the size of the array, the ioctl must + * be retried. + * + * Returns: + * 0 for success, -errno for errors. + */ +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_amdgpu_gem_list_handles *args = data; + struct drm_amdgpu_gem_list_handles_entry *bo_entries; + struct drm_gem_object *gobj; + int id, ret = 0; + int bo_index = 0; + int num_bos = 0; + + spin_lock(&filp->table_lock); + idr_for_each_entry(&filp->object_idr, gobj, id) + num_bos += 1; + spin_unlock(&filp->table_lock); + + if (args->num_entries < num_bos) { + args->num_entries = num_bos; + return 0; + } + + if (num_bos == 0) { + args->num_entries = 0; + return 0; + } + + bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL); + if (!bo_entries) + return -ENOMEM; + + spin_lock(&filp->table_lock); + idr_for_each_entry(&filp->object_idr, gobj, id) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct drm_amdgpu_gem_list_handles_entry *bo_entry; + + if (bo_index >= num_bos) { + ret = -EAGAIN; + break; + } + + bo_entry = &bo_entries[bo_index]; + + bo_entry->size = amdgpu_bo_size(bo); + bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK; + bo_entry->preferred_domains = bo->preferred_domains; + bo_entry->gem_handle = id; + bo_entry->alignment = bo->tbo.page_alignment; + + if (bo->tbo.base.import_attach) + bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT; + + bo_index += 1; + } + spin_unlock(&filp->table_lock); + + args->num_entries = bo_index; + + if (!ret) + if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries))) + ret = -EFAULT; + + kvfree(bo_entries); + + return ret; } static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 3a8f57900a3a..b558336bc4c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags); int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \ + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \ + AMDGPU_GEM_CREATE_CPU_GTT_USWC | \ + AMDGPU_GEM_CREATE_VRAM_CLEARED | \ + AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \ + AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \ + AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \ + AMDGPU_GEM_CREATE_ENCRYPTED | \ + AMDGPU_GEM_CREATE_GFX12_DCC | \ + AMDGPU_GEM_CREATE_DISCARDABLE | \ + AMDGPU_GEM_CREATE_COHERENT | \ + AMDGPU_GEM_CREATE_UNCACHED | \ + AMDGPU_GEM_CREATE_EXT_COHERENT) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c5646af055ab..a09ccf7d8aa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -149,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) { if (amdgpu_compute_multipipe != -1) { - DRM_INFO("amdgpu: forcing compute pipe policy %d\n", + dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", amdgpu_compute_multipipe); return amdgpu_compute_multipipe == 1; } @@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ if (WARN_ON(i > (sizeof(queue_mask)*8))) { - DRM_ERROR("Invalid KCQ enabled: %d\n", i); + dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); break; } @@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) amdgpu_device_flush_hdp(adev, NULL); - DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, - kiq_ring->queue); + dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me, + kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -712,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KCQ enable failed\n"); + dev_err(adev->dev, "KCQ enable failed\n"); return r; } @@ -734,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_mes_map_legacy_queue(adev, &adev->gfx.gfx_ring[j]); if (r) { - DRM_ERROR("failed to map gfx queue\n"); + dev_err(adev->dev, "failed to map gfx queue\n"); return r; } } @@ -748,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_gfx_rings); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -769,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KGQ enable failed\n"); + dev_err(adev->dev, "KGQ enable failed\n"); return r; } @@ -1030,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, ih_data.head = *ras_if; - DRM_ERROR("CP ECC ERROR IRQ\n"); + dev_err(adev->dev, "CP ECC ERROR IRQ\n"); amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -1474,7 +1474,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) owner = (void *)(unsigned long)atomic_inc_return(&counter); r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, - 64, 0, &job); + 64, 0, &job, + AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER); if (r) goto err; @@ -2279,7 +2280,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring) * Return: * return the latest index. */ -u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) +u32 amdgpu_gfx_csb_preamble_start(u32 *buffer) { u32 count = 0; @@ -2303,7 +2304,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) * Return: * return the latest index. */ -u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count) +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count) { const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; @@ -2330,7 +2331,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, * @buffer: This is an output variable that gets the PACKET3 preamble end. * @count: Index to start set the preemble end. */ -void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count) +void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count) { buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 08f268dab8f5..fb5f7a0ee029 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -642,9 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work); void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring); -u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer); -u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count); -void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count); +u32 amdgpu_gfx_csb_preamble_start(u32 *buffer); +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count); +void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count); void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 6b0fbbb91e57..9dcf51991b5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -38,6 +38,13 @@ #include <drm/drm_drv.h> #include <drm/ttm/ttm_tt.h> +static const u64 four_gb = 0x100000000ULL; + +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev) +{ + return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev); +} + /** * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 * @@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; - mc->gart_start = hive_vram_end + 1; + /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */ + mc->gart_start = ALIGN(hive_vram_end + 1, four_gb); mc->gart_end = mc->gart_start + mc->gart_size - 1; - mc->fb_start = hive_vram_start; - mc->fb_end = hive_vram_end; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + /* set mc->vram_start to 0 to switch the returned GPU address of + * amdgpu_bo_create_reserved() from FB aperture to GART aperture. + */ + mc->vram_start = 0; + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size); + } else { + mc->fb_start = hive_vram_start; + mc->fb_end = hive_vram_end; + } dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); @@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, enum amdgpu_gart_placement gart_placement) { - const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); @@ -674,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, - &job); + &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB); if (r) goto error_alloc; @@ -1041,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) */ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; - u64 vram_addr = adev->vm_manager.vram_base_offset - - adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - u64 vram_end = vram_addr + vram_size; + u64 vram_addr, vram_end; u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); int idx; @@ -1056,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); flags |= AMDGPU_PDE_PTE_FLAG(adev); + vram_addr = adev->vm_manager.vram_base_offset; + if (!amdgpu_virt_xgmi_migrate_enabled(adev)) + vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + vram_end = vram_addr + vram_size; + /* The first n PDE0 entries are used as PTE, * pointing to vram */ @@ -1429,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) return false; } + +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + case 8: + return AMDGPU_NPS8_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } +} + +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs && + adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev, + supp_modes); + else + dev_warn(adev->dev, "memory partition mode query is not supported\n"); + + return mode; +} + +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return amdgpu_gmc_get_vf_memory_partition(adev); + else + return amdgpu_gmc_get_memory_partition(adev, NULL); +} + +static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode; + u32 supp_modes; + bool valid; + + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); + + /* Mode detected by hardware not present in supported modes */ + if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && + !(BIT(mode - 1) & supp_modes)) + return false; + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + case AMDGPU_NPS1_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 1); + break; + case AMDGPU_NPS2_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 2); + break; + case AMDGPU_NPS4_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 3 || + adev->gmc.num_mem_partitions == 4); + break; + case AMDGPU_NPS8_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 8); + break; + default: + valid = false; + } + + return valid; +} + +static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid) +{ + int i; + + /* Check if node with id 'nid' is present in 'node_ids' array */ + for (i = 0; i < num_ids; ++i) + if (node_ids[i] == nid) + return true; + + return false; +} + +static void +amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + struct amdgpu_numa_info numa_info; + int node_ids[AMDGPU_MAX_MEM_RANGES]; + int num_ranges = 0, ret; + int num_xcc, xcc_id; + uint32_t xcc_mask; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + xcc_mask = (1U << num_xcc) - 1; + + for_each_inst(xcc_id, xcc_mask) { + ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + if (ret) + continue; + + if (numa_info.nid == NUMA_NO_NODE) { + mem_ranges[0].size = numa_info.size; + mem_ranges[0].numa.node = numa_info.nid; + num_ranges = 1; + break; + } + + if (amdgpu_gmc_is_node_present(node_ids, num_ranges, + numa_info.nid)) + continue; + + node_ids[num_ranges] = numa_info.nid; + mem_ranges[num_ranges].numa.node = numa_info.nid; + mem_ranges[num_ranges].size = numa_info.size; + ++num_ranges; + } + + adev->gmc.num_mem_partitions = num_ranges; +} + +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + enum amdgpu_memory_partition mode; + u32 start_addr = 0, size; + int i, r, l; + + mode = amdgpu_gmc_query_memory_partition(adev); + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; + case AMDGPU_NPS1_PARTITION_MODE: + adev->gmc.num_mem_partitions = 1; + break; + case AMDGPU_NPS2_PARTITION_MODE: + adev->gmc.num_mem_partitions = 2; + break; + case AMDGPU_NPS4_PARTITION_MODE: + if (adev->flags & AMD_IS_APU) + adev->gmc.num_mem_partitions = 3; + else + adev->gmc.num_mem_partitions = 4; + break; + case AMDGPU_NPS8_PARTITION_MODE: + adev->gmc.num_mem_partitions = 8; + break; + default: + adev->gmc.num_mem_partitions = 1; + break; + } + + /* Use NPS range info, if populated */ + r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, + &adev->gmc.num_mem_partitions); + if (!r) { + l = 0; + for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { + if (mem_ranges[i].range.lpfn > + mem_ranges[i - 1].range.lpfn) + l = i; + } + + } else { + if (!adev->gmc.num_mem_partitions) { + dev_warn(adev->dev, + "Not able to detect NPS mode, fall back to NPS1\n"); + adev->gmc.num_mem_partitions = 1; + } + /* Fallback to sw based calculation */ + size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; + + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + mem_ranges[i].range.fpfn = start_addr; + mem_ranges[i].size = + ((u64)size << AMDGPU_GPU_PAGE_SHIFT); + mem_ranges[i].range.lpfn = start_addr + size - 1; + start_addr += size; + } + + l = adev->gmc.num_mem_partitions - 1; + } + + /* Adjust the last one */ + mem_ranges[l].range.lpfn = + (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; + mem_ranges[l].size = + adev->gmc.real_vram_size - + ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); +} + +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev) +{ + bool valid; + + adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES, + sizeof(struct amdgpu_mem_partition_info), + GFP_KERNEL); + if (!adev->gmc.mem_partitions) + return -ENOMEM; + + if (adev->gmc.is_app_apu) + amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); + else + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + + if (amdgpu_sriov_vf(adev)) + valid = true; + else + valid = amdgpu_gmc_validate_partition_info(adev); + if (!valid) { + /* TODO: handle invalid case */ + dev_warn(adev->dev, + "Mem ranges not matching with hardware config\n"); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 80fa29c26e9e..55097ca10738 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -84,6 +84,8 @@ enum amdgpu_memory_partition { #define AMDGPU_GMC_INIT_RESET_NPS BIT(0) +#define AMDGPU_MAX_MEM_RANGES 8 + /* * GMC page fault information */ @@ -152,15 +154,15 @@ struct amdgpu_gmc_funcs { unsigned pasid); /* enable/disable PRT support */ void (*set_prt)(struct amdgpu_device *adev, bool enable); - /* map mtype to hardware flags */ - uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); - /* get the pte flags to use for a BO VA mapping */ + /* get the pte flags to use for PTEs */ void (*get_vm_pte)(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, - uint64_t *flags); + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, + uint64_t *pte_flags); /* override per-page pte flags */ void (*override_vm_pte_flags)(struct amdgpu_device *dev, struct amdgpu_vm *vm, @@ -354,9 +356,10 @@ struct amdgpu_gmc { #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) -#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) -#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) +#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \ + ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \ + (pte_flags))) #define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \ (adev)->gmc.gmc_funcs->override_vm_pte_flags \ ((adev), (vm), (addr), (pte_flags)) @@ -394,6 +397,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) return addr; } +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev); int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev); void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); @@ -455,5 +459,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, int nps_mode); void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); - +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev); +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes); +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev); +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index e36fede7f74c..2c6a6b858112 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -167,13 +167,12 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, - void *owner, struct page **pages, + void *owner, struct hmm_range **phmm_range) { struct hmm_range *hmm_range; unsigned long end; unsigned long timeout; - unsigned long i; unsigned long *pfns; int r = 0; @@ -222,14 +221,6 @@ retry: hmm_range->start = start; hmm_range->hmm_pfns = pfns; - /* - * Due to default_flags, all pages are HMM_PFN_VALID or - * hmm_range_fault() fails. FIXME: The pages cannot be touched outside - * the notifier_lock, and mmu_interval_read_retry() must be done first. - */ - for (i = 0; pages && i < npages; i++) - pages[i] = hmm_pfn_to_page(pfns[i]); - *phmm_range = hmm_range; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index e2edcd010ccc..953e1d06de20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -33,7 +33,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, - void *owner, struct page **pages, + void *owner, struct hmm_range **phmm_range); bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 8179d0814db9..9cb72f0c5277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -24,7 +24,6 @@ * Alex Deucher */ -#include <linux/export.h> #include <linux/pci.h> #include <drm/drm_edid.h> @@ -185,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; - ret = i2c_add_adapter(&i2c->adapter); + ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter); if (ret) goto out_free; } else { @@ -216,15 +215,6 @@ out_free: } -void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) -{ - if (!i2c) - return; - WARN_ON(i2c->has_aux); - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} - void amdgpu_i2c_init(struct amdgpu_device *adev) { if (!adev->is_atom_fw) { @@ -249,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) { int i; - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { - if (adev->i2c_bus[i]) { - amdgpu_i2c_destroy(adev->i2c_bus[i]); + for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) + if (adev->i2c_bus[i]) adev->i2c_bus[i] = NULL; - } - } } /* looks up bus based on id */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 802743efa3b3..7d9bcb72e8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; + struct amdgpu_fence *af; bool need_ctx_switch; struct amdgpu_vm *vm; uint64_t fence_ctx; @@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, int vmid = AMDGPU_JOB_GET_VMID(job); bool need_pipe_sync = false; unsigned int cond_exec; - unsigned int i; int r = 0; @@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; + af = &job->hw_fence; + /* Save the context of the job for reset handling. + * The driver needs this so it can skip the ring + * contents for guilty contexts. + */ + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } else { vm = NULL; fence_ctx = 0; @@ -161,6 +167,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; + af = NULL; } if (!ring->sched.ready) { @@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, job, fence_flags); + r = amdgpu_fence_emit(ring, f, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) ring->funcs->emit_wave_limit(ring, false); + /* Save the wptr associated with this fence. + * This must be last for resets to work properly + * as we need to save the wptr associated with this + * fence so we know what rings contents to backup + * after we reset the queue. + */ + amdgpu_fence_save_wptr(*f); + amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 5dd78a9cb12d..3ef5bc95642c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -275,13 +275,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; - struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; bool needs_flush = vm->use_cpu_for_update; uint64_t updates = amdgpu_vm_tlb_seq(vm); int r; - *id = id_mgr->reserved; + *id = vm->reserved_vmid[vmhub]; if ((*id)->owner != vm->immediate.fence_context || !amdgpu_vmid_compatible(*id, job) || (*id)->flushed_updates < updates || @@ -474,40 +473,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) return vm->reserved_vmid[vmhub]; } -int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, +/* + * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm + * @adev: amdgpu device structure + * @vm: the VM to reserve an ID for + * @vmhub: the VMHUB which should be used + * + * Mostly used to have a reserved VMID for debugging and SPM. + * + * Returns: 0 for success, -ENOENT if an ID is already reserved. + */ +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id; + int r = 0; mutex_lock(&id_mgr->lock); - - ++id_mgr->reserved_use_count; - if (!id_mgr->reserved) { - struct amdgpu_vmid *id; - - id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, - list); - /* Remove from normal round robin handling */ - list_del_init(&id->list); - id_mgr->reserved = id; + if (vm->reserved_vmid[vmhub]) + goto unlock; + if (id_mgr->reserved_vmid) { + r = -ENOENT; + goto unlock; } - + /* Remove from normal round robin handling */ + id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); + list_del_init(&id->list); + vm->reserved_vmid[vmhub] = id; + id_mgr->reserved_vmid = true; mutex_unlock(&id_mgr->lock); + return 0; +unlock: + mutex_unlock(&id_mgr->lock); + return r; } -void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, +/* + * amdgpu_vmid_free_reserved - free up a reserved VMID again + * @adev: amdgpu device structure + * @vm: the VM with the reserved ID + * @vmhub: the VMHUB which should be used + */ +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (!--id_mgr->reserved_use_count) { - /* give the reserved ID back to normal round robin */ - list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); - id_mgr->reserved = NULL; + if (vm->reserved_vmid[vmhub]) { + list_add(&vm->reserved_vmid[vmhub]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[vmhub] = NULL; + id_mgr->reserved_vmid = false; } - mutex_unlock(&id_mgr->lock); } @@ -574,7 +594,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) mutex_init(&id_mgr->lock); INIT_LIST_HEAD(&id_mgr->ids_lru); - id_mgr->reserved_use_count = 0; /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) @@ -594,11 +613,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } - /* alloc a default reserved vmid to enforce isolation */ - for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { - if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE) - amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); - } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 240fa6751260..b3649cd3af56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr { unsigned num_ids; struct list_head ids_lru; struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; - struct amdgpu_vmid *reserved; - unsigned int reserved_use_count; + bool reserved_vmid; }; int amdgpu_pasid_alloc(unsigned int bits); @@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); -int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - unsigned vmhub); -void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - unsigned vmhub); +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + unsigned vmhub); +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + unsigned vmhub); int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence); void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 30f16968b578..a6419246e9c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -218,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) restart_ih: count = AMDGPU_IH_MAX_NUM_IVS; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); + dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 7f7ea046e209..f58b6be7fccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -56,14 +56,14 @@ struct amdgpu_ih_ring { bool use_bus_addr; struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; + uint32_t *ring; uint64_t gpu_addr; uint64_t wptr_addr; - volatile uint32_t *wptr_cpu; + uint32_t *wptr_cpu; uint64_t rptr_addr; - volatile uint32_t *rptr_cpu; + uint32_t *rptr_cpu; bool enabled; unsigned rptr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c new file mode 100644 index 000000000000..99e1cf4fc955 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c @@ -0,0 +1,96 @@ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ip.h" + +static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst) +{ + int8_t dev_inst; + + switch (block) { + case GC_HWIP: + case SDMA0_HWIP: + /* Both JPEG and VCN as JPEG is only alias of VCN */ + case VCN_HWIP: + dev_inst = adev->ip_map.dev_inst[block][inst]; + break; + default: + /* For rest of the IPs, no look up required. + * Assume 'logical instance == physical instance' for all configs. */ + dev_inst = inst; + break; + } + + return dev_inst; +} + +static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask) +{ + uint32_t dev_mask = 0; + int8_t log_inst, dev_inst; + + while (mask) { + log_inst = ffs(mask) - 1; + dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst); + dev_mask |= (1 << dev_inst); + mask &= ~(1 << log_inst); + } + + return dev_mask; +} + +static void amdgpu_populate_ip_map(struct amdgpu_device *adev, + enum amd_hw_ip_block_type ip_block, + uint32_t inst_mask) +{ + int l = 0, i; + + while (inst_mask) { + i = ffs(inst_mask) - 1; + adev->ip_map.dev_inst[ip_block][l++] = i; + inst_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[ip_block][l] = -1; +} + +void amdgpu_ip_map_init(struct amdgpu_device *adev) +{ + u32 ip_map[][2] = { + { GC_HWIP, adev->gfx.xcc_mask }, + { SDMA0_HWIP, adev->sdma.sdma_mask }, + { VCN_HWIP, adev->vcn.inst_mask }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ip_map); ++i) + amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); + + adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst; + adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h index 0d878ca3acba..2490fd322aec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2025 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,12 +21,9 @@ * */ -#ifndef __DCE_V11_0_H__ -#define __DCE_V11_0_H__ +#ifndef __AMDGPU_IP_H__ +#define __AMDGPU_IP_H__ -extern const struct amdgpu_ip_block_version dce_v11_0_ip_block; -extern const struct amdgpu_ip_block_version dce_v11_2_ip_block; +void amdgpu_ip_map_init(struct amdgpu_device *adev); -void dce_v11_0_disable_dce(struct amdgpu_device *adev); - -#endif +#endif /* __AMDGPU_IP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 13c60cac4261..8112ffc85995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) - DRM_ERROR("error disabling interrupt (%d)\n", - r); + dev_err(adev->dev, + "error disabling interrupt (%d)\n", + r); } } } @@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } -static void amdgpu_restore_msix(struct amdgpu_device *adev) +void amdgpu_restore_msix(struct amdgpu_device *adev) { u16 ctrl; @@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.irq = irq; adev_to_drm(adev)->max_vblank_count = 0x00ffffff; - DRM_DEBUG("amdgpu: irq initialized.\n"); + dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); return 0; free_vectors: @@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, src_id = entry.src_id; if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { - DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); + dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { - DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); + dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || (client_id == SOC15_IH_CLIENTID_ISP)) && @@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, generic_handle_domain_irq(adev->irq.domain, src_id); } else if (!adev->irq.client[client_id].sources) { - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", - client_id, src_id); + dev_dbg(adev->dev, + "Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); } else if ((src = adev->irq.client[client_id].sources[src_id])) { r = src->funcs->process(adev, src, &entry); if (r < 0) - DRM_ERROR("error processing interrupt (%d)\n", r); + dev_err(adev->dev, "error processing interrupt (%d)\n", + r); else if (r) handled = true; } else { - DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n", + dev_dbg(adev->dev, + "Unregistered interrupt src_id: %d of client_id:%d\n", src_id, client_id); } @@ -620,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type) { /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ - if (amdgpu_ras_is_rma(adev)) + if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type)) return -EINVAL; if (!adev->irq.installed) @@ -732,7 +736,7 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev) adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { - DRM_ERROR("GPU irq add domain failed\n"); + dev_err(adev->dev, "GPU irq add domain failed\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 04c0b4fa17a4..9f0417456abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id); +void amdgpu_restore_msix(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 43fc941dfa57..9cddbf50442a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,6 +33,8 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" +#define ISP_MC_ADDR_ALIGN (1024 * 32) + /** * isp_hw_init - start and test isp block * @@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block, return 0; } +static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev) +{ + if (isp_parent != amdgpu_dev) + return -EINVAL; + + return 0; +} + +/** + * isp_user_buffer_alloc - create user buffer object (BO) for isp + * + * @dev: isp device handle + * @dmabuf: DMABUF handle for isp buffer allocated in system memory + * @buf_obj: GPU buffer object handle to initialize + * @buf_addr: GPU addr of the pinned BO to initialize + * + * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does + * GART alloc to generate GPU addr for BO to make it accessible through the + * GART aperture for ISP HW. + * + * This function is exported to allow the V4L2 isp device external to drm device + * to create and access the isp user BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_user_buffer_alloc(struct device *dev, void *dmabuf, + void **buf_obj, u64 *buf_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + struct amdgpu_bo *bo; + u64 gpu_addr; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!buf_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_isp_user(adev, dmabuf, + AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr); + if (ret) { + drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret); + return ret; + } + + *buf_obj = (void *)bo; + *buf_addr = gpu_addr; + + return 0; +} +EXPORT_SYMBOL(isp_user_buffer_alloc); + +/** + * isp_user_buffer_free - free isp user buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * + * unpin and unref BO for isp internal use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the isp user BO. + */ +void isp_user_buffer_free(void *buf_obj) +{ + amdgpu_bo_free_isp_user(buf_obj); +} +EXPORT_SYMBOL(isp_user_buffer_free); + +/** + * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp + * + * @dev: isp device handle + * @size: size for the new BO + * @buf_obj: GPU BO handle to initialize + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: CPU address mapping of BO + * + * Allocates and pins a kernel BO for internal isp firmware use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to create and access the kernel BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_kernel_buffer_alloc(struct device *dev, u64 size, + void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!gpu_addr)) + return -EINVAL; + + if (WARN_ON(!cpu_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_kernel(adev, + size, + ISP_MC_ADDR_ALIGN, + AMDGPU_GEM_DOMAIN_GTT, + bo, + gpu_addr, + cpu_addr); + if (!cpu_addr || ret) { + drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(isp_kernel_buffer_alloc); + +/** + * isp_kernel_buffer_free - free isp kernel buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * @gpu_addr: GPU addr of isp kernel BO + * @cpu_addr: CPU addr of isp kernel BO + * + * unmaps and unpin a isp kernel BO. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the kernel BO. + */ +void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + + amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr); +} +EXPORT_SYMBOL(isp_kernel_buffer_free); + static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h index 4f3b7b5d9c1f..d6f4ffa4c97c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h @@ -28,16 +28,13 @@ #ifndef __AMDGPU_ISP_H__ #define __AMDGPU_ISP_H__ +#include <drm/amd/isp.h> +#include <linux/pm_domain.h> + #define ISP_REGS_OFFSET_END 0x629A4 struct amdgpu_isp; -struct isp_platform_data { - void *adev; - u32 asic_type; - resource_size_t base_rmmio_size; -}; - struct isp_funcs { int (*hw_init)(struct amdgpu_isp *isp); int (*hw_fini)(struct amdgpu_isp *isp); @@ -54,6 +51,7 @@ struct amdgpu_isp { struct isp_platform_data *isp_pdata; unsigned int harvest_config; const struct firmware *fw; + struct generic_pm_domain ispgpd; }; extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index ddb9d3269357..d020a890a0ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); - struct amdgpu_task_info *ti; + struct drm_wedge_task_info *info = NULL; + struct amdgpu_task_info *ti = NULL; struct amdgpu_device *adev = ring->adev; - int idx; - int r; + int idx, r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", @@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) amdgpu_job_core_dump(adev, job); if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { dev_err(adev->dev, "ring %s timeout, but soft recovered\n", s_job->sched->name); @@ -124,53 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); if (ti) { - dev_err(adev->dev, - "Process information: process %s pid %d thread %s pid %d\n", - ti->process_name, ti->tgid, ti->task_name, ti->pid); - amdgpu_vm_put_task_info(ti); + amdgpu_vm_print_task_info(adev, ti); + info = &ti->task; } /* attempt a per ring reset */ if (unlikely(adev->debug_disable_gpu_ring_reset)) { dev_err(adev->dev, "Ring reset disabled by debug mask\n"); - } else if (amdgpu_gpu_recovery && ring->funcs->reset) { - bool is_guilty; - - dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - /* stop the scheduler, but don't mess with the - * bad job yet because if ring reset fails - * we'll fall back to full GPU reset. - */ - drm_sched_wqueue_stop(&ring->sched); - - /* for engine resets, we need to reset the engine, - * but individual queues may be unaffected. - * check here to make sure the accounting is correct. - */ - if (ring->funcs->is_guilty) - is_guilty = ring->funcs->is_guilty(ring); - else - is_guilty = true; - - if (is_guilty) - dma_fence_set_error(&s_job->s_fence->finished, -ETIME); - - r = amdgpu_ring_reset(ring, job->vmid); + } else if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && + ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", + s_job->sched->name); + r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); if (!r) { - if (amdgpu_ring_sched_ready(ring)) - drm_sched_stop(&ring->sched, s_job); - if (is_guilty) { - atomic_inc(&ring->adev->gpu_reset_counter); - amdgpu_fence_driver_force_completion(ring); - } - if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched, 0); - dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE); + atomic_inc(&ring->adev->gpu_reset_counter); + dev_err(adev->dev, "Ring %s reset succeeded\n", + ring->sched.name); + drm_dev_wedged_event(adev_to_drm(adev), + DRM_WEDGE_RECOVERY_NONE, info); goto exit; } - dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); + dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); } + dma_fence_set_error(&s_job->s_fence->finished, -ETIME); if (amdgpu_device_should_recover_gpu(ring->adev)) { @@ -198,13 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } exit: + amdgpu_vm_put_task_info(ti); drm_dev_exit(idx); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, - unsigned int num_ibs, struct amdgpu_job **job) + unsigned int num_ibs, struct amdgpu_job **job, + u64 drm_client_id) { if (num_ibs == 0) return -EINVAL; @@ -222,17 +202,19 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!entity) return 0; - return drm_sched_job_init(&(*job)->base, entity, 1, owner); + return drm_sched_job_init(&(*job)->base, entity, 1, owner, + drm_client_id); } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job) + struct amdgpu_job **job, u64 k_job_id) { int r; - r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, + k_job_id); if (r) return r; @@ -384,13 +366,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); goto error; } - /* - * The VM structure might be released after the VMID is - * assigned, we had multiple problems with people trying to use - * the VM pointer so better set it to NULL. - */ - if (!fence) - job->vm = NULL; return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 931fed8892cc..4a6487eb6cb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -44,6 +44,22 @@ struct amdgpu_fence; enum amdgpu_ib_pool_type; +/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */ +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL) +#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL) +#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL) +#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL) +#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL) +#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL) +#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL) + struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; @@ -91,11 +107,13 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, - unsigned int num_ibs, struct amdgpu_job **job); + unsigned int num_ibs, struct amdgpu_job **job, + u64 drm_client_id); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job); + struct amdgpu_job **job, + u64 k_job_id); void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa); void amdgpu_job_free_resources(struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index dda29132dfb2..6b7d66b6d4cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); } - if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) + if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) { + mutex_lock(&adev->jpeg.jpeg_pg_lock); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, AMD_PG_STATE_GATE); - else + mutex_unlock(&adev->jpeg.jpeg_pg_lock); + } else schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); } @@ -194,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -368,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; - if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j))) + if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j))) ring->sched.ready = true; else ring->sched.ready = false; @@ -463,7 +466,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev, adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count, sizeof(uint32_t), GFP_KERNEL); if (!adev->jpeg.ip_dump) { - DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n"); + dev_err(adev->dev, + "Failed to allocate memory for JPEG IP Dump\n"); return -ENOMEM; } adev->jpeg.reg_list = reg; @@ -536,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri drm_printf(p, "\nInactive Instance:JPEG%d\n", i); } } + +static inline bool amdgpu_jpeg_reg_valid(u32 reg) +{ + if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END || + (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END)) + return false; + else + return true; +} + +/** + * amdgpu_jpeg_dec_parse_cs - command submission parser + * + * @parser: Command submission parser context + * @job: the job to parse + * @ib: the IB to parse + * + * Parse the command stream, return -EINVAL for invalid packet, + * 0 otherwise + */ + +int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, + struct amdgpu_job *job, + struct amdgpu_ib *ib) +{ + u32 i, reg, res, cond, type; + struct amdgpu_device *adev = parser->adev; + + for (i = 0; i < ib->length_dw ; i += 2) { + reg = CP_PACKETJ_GET_REG(ib->ptr[i]); + res = CP_PACKETJ_GET_RES(ib->ptr[i]); + cond = CP_PACKETJ_GET_COND(ib->ptr[i]); + type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); + + if (res) /* only support 0 at the moment */ + return -EINVAL; + + switch (type) { + case PACKETJ_TYPE0: + if (cond != PACKETJ_CONDITION_CHECK0 || + !amdgpu_jpeg_reg_valid(reg)) { + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + } + break; + case PACKETJ_TYPE3: + if (cond != PACKETJ_CONDITION_CHECK3 || + !amdgpu_jpeg_reg_valid(reg)) { + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + } + break; + case PACKETJ_TYPE6: + if (ib->ptr[i] == CP_PACKETJ_NOP) + continue; + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + default: + dev_err(adev->dev, "Unknown packet type %d !\n", type); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 4f0775e39b54..346ae0ab09d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -25,11 +25,18 @@ #define __AMDGPU_JPEG_H__ #include "amdgpu_ras.h" +#include "amdgpu_cs.h" #define AMDGPU_MAX_JPEG_INSTANCES 4 #define AMDGPU_MAX_JPEG_RINGS 10 #define AMDGPU_MAX_JPEG_RINGS_4_0_3 8 +#define JPEG_REG_RANGE_START 0x4000 +#define JPEG_REG_RANGE_END 0x41c2 +#define JPEG_ATOMIC_RANGE_START 0x4120 +#define JPEG_ATOMIC_RANGE_END 0x412A + + #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) @@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev, const struct amdgpu_hwip_reg_entry *reg, u32 count); void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block); void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p); +int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, + struct amdgpu_job *job, + struct amdgpu_ib *ib); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d2ce7d86dbc8..8676400834fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) return; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD)) DRM_WARN("smart shift update failed\n"); amdgpu_acpi_fini(adev); @@ -161,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) if (acpi_status) dev_dbg(dev->dev, "Error during ACPI methods call\n"); - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD)) DRM_WARN("smart shift update failed\n"); out: @@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; unsigned int num_rings = 0; + uint32_t num_slots = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->gfx.gfx_ring[i].sched.ready && !adev->gfx.gfx_ring[i].no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) + num_slots += hweight32(adev->mes.gfx_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; @@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->gfx.compute_ring[i].sched.ready && !adev->gfx.compute_ring[i].no_user_submission) ++num_rings; + + if (!adev->sdma.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) + num_slots += hweight32(adev->mes.compute_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; @@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->sdma.instance[i].ring.sched.ready && !adev->sdma.instance[i].ring.no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) + num_slots += hweight32(adev->mes.sdma_hqd_mask[i]); + } + ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, } result->capabilities_flags = 0; result->available_rings = (1 << num_rings) - 1; + result->userq_num_slots = num_slots; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; @@ -919,6 +939,10 @@ out: if (adev->gfx.config.ta_cntl2_truncate_coord_mode) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; + /* Gang submit is not supported under SRIOV currently */ + if (!amdgpu_sriov_vf(adev)) + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT; + if (amdgpu_passthrough(adev)) dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT << AMDGPU_IDS_FLAGS_MODE_SHIFT) & @@ -1395,6 +1419,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (r) goto error_pasid; + amdgpu_debugfs_vm_init(file_priv); + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); if (r) goto error_pasid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 6fa9fa11c8f3..5bf9be073cdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -47,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) /* Bitmap for dynamic allocation of kernel doorbells */ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); if (!mes->doorbell_bitmap) { - DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); + dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n"); return -ENOMEM; } @@ -191,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev) if (r) goto error_doorbell; + if (adev->mes.hung_queue_db_array_size) { + r = amdgpu_bo_create_kernel(adev, + adev->mes.hung_queue_db_array_size * sizeof(u32), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mes.hung_queue_db_array_gpu_obj, + &adev->mes.hung_queue_db_array_gpu_addr, + &adev->mes.hung_queue_db_array_cpu_addr); + if (r) { + dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r); + goto error_doorbell; + } + } + return 0; error_doorbell: @@ -216,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) { int i; + amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj, + &adev->mes.hung_queue_db_array_gpu_addr, + &adev->mes.hung_queue_db_array_cpu_addr); + amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); @@ -256,7 +274,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev) r = adev->mes.funcs->suspend_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to suspend all gangs"); + dev_err(adev->dev, "failed to suspend all gangs"); return r; } @@ -280,7 +298,7 @@ int amdgpu_mes_resume(struct amdgpu_device *adev) r = adev->mes.funcs->resume_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to resume all gangs"); + dev_err(adev->dev, "failed to resume all gangs"); return r; } @@ -304,7 +322,7 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to map legacy queue\n"); + dev_err(adev->dev, "failed to map legacy queue\n"); return r; } @@ -329,7 +347,7 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to unmap legacy queue\n"); + dev_err(adev->dev, "failed to unmap legacy queue\n"); return r; } @@ -361,7 +379,54 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to reset legacy queue\n"); + dev_err(adev->dev, "failed to reset legacy queue\n"); + + return r; +} + +int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev) +{ + return adev->mes.hung_queue_db_array_size; +} + +int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, + int queue_type, + bool detect_only, + unsigned int *hung_db_num, + u32 *hung_db_array) + +{ + struct mes_detect_and_reset_queue_input input; + u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr; + int r, i; + + if (!hung_db_num || !hung_db_array) + return -EINVAL; + + if ((queue_type != AMDGPU_RING_TYPE_GFX) && + (queue_type != AMDGPU_RING_TYPE_COMPUTE) && + (queue_type != AMDGPU_RING_TYPE_SDMA)) + return -EINVAL; + + /* Clear the doorbell array before detection */ + memset(adev->mes.hung_queue_db_array_cpu_addr, 0, + adev->mes.hung_queue_db_array_size * sizeof(u32)); + input.queue_type = queue_type; + input.detect_only = detect_only; + + r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes, + &input); + if (r) { + dev_err(adev->dev, "failed to detect and reset\n"); + } else { + *hung_db_num = 0; + for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) { + if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) { + hung_db_array[i] = db_array[i]; + *hung_db_num += 1; + } + } + } return r; } @@ -469,7 +534,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes set shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes set shader debugger is not supported!\n"); return -EINVAL; } @@ -493,7 +559,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); + dev_err(adev->dev, "failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); @@ -507,7 +573,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes flush shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes flush shader debugger is not supported!\n"); return -EINVAL; } @@ -519,7 +586,7 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); + dev_err(adev->dev, "failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index c0d2c195fe2e..6b506fc72f58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -41,6 +41,7 @@ #define AMDGPU_MES_API_VERSION_MASK 0x00fff000 #define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000 #define AMDGPU_MES_MSCRATCH_SIZE 0x40000 +#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff enum amdgpu_mes_priority_level { AMDGPU_MES_PRIORITY_LEVEL_LOW = 0, @@ -147,6 +148,10 @@ struct amdgpu_mes { uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES]; void *resource_1_addr[AMDGPU_MAX_MES_PIPES]; + int hung_queue_db_array_size; + struct amdgpu_bo *hung_queue_db_array_gpu_obj; + uint64_t hung_queue_db_array_gpu_addr; + void *hung_queue_db_array_cpu_addr; }; struct amdgpu_mes_gang { @@ -280,6 +285,18 @@ struct mes_reset_queue_input { bool is_kq; }; +struct mes_detect_and_reset_queue_input { + uint32_t queue_type; + bool detect_only; +}; + +struct mes_inv_tlbs_pasid_input { + uint32_t xcc_id; + uint16_t pasid; + uint8_t hub_id; + uint8_t flush_type; +}; + enum mes_misc_opcode { MES_MISC_OP_WRITE_REG, MES_MISC_OP_READ_REG, @@ -367,6 +384,13 @@ struct amdgpu_mes_funcs { int (*reset_hw_queue)(struct amdgpu_mes *mes, struct mes_reset_queue_input *input); + + int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes, + struct mes_detect_and_reset_queue_input *input); + + + int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes, + struct mes_inv_tlbs_pasid_input *input); }; #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev)) @@ -390,6 +414,13 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, unsigned int vmid, bool use_mmio); +int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev); +int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, + int queue_type, + bool detect_only, + unsigned int *hung_db_num, + u32 *hung_db_array); + uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg); int amdgpu_mes_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t val); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 6da4f946cac0..20460cfd09bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -496,8 +496,6 @@ struct amdgpu_crtc { struct drm_connector *connector; /* for dpm */ u32 line_time; - u32 wm_low; - u32 wm_high; u32 lb_vblank_lead_lines; struct drm_display_mode hw_mode; /* for virtual dce */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index d085687a47ea..a974265837f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) return 0; } +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev) || !adev->asic_funcs || + !adev->asic_funcs->get_pcie_replay_count || + (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count)) + return false; + + return true; +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 79c2f807b9fe..b528de6a01f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev); +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 73403744331a..e08f58de4b17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/dma-buf.h> +#include <linux/export.h> #include <drm/drm_drv.h> #include <drm/amdgpu_drm.h> @@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_kunmap(bo); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); drm_gem_object_release(&bo->tbo.base); amdgpu_bo_unref(&bo->parent); @@ -152,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } + if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) { + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].mem_type = AMDGPU_PL_MMIO_REMAP; + places[c].flags = 0; + c++; + } + if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; @@ -351,7 +360,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, return 0; } -EXPORT_SYMBOL(amdgpu_bo_create_kernel); /** * amdgpu_bo_create_isp_user - create user BO for isp @@ -420,7 +428,6 @@ error_unreserve: return r; } -EXPORT_SYMBOL(amdgpu_bo_create_isp_user); /** * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location @@ -524,7 +531,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, if (cpu_addr) *cpu_addr = NULL; } -EXPORT_SYMBOL(amdgpu_bo_free_kernel); /** * amdgpu_bo_free_isp_user - free BO for isp use @@ -547,7 +553,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo) } amdgpu_bo_unref(&bo); } -EXPORT_SYMBOL(amdgpu_bo_free_isp_user); /* Validate bo size is bit bigger than the request domain */ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, @@ -939,7 +944,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) domain = bo->preferred_domains & domain; /* A shared bo cannot be migrated to VRAM */ - if (bo->tbo.base.import_attach) { + if (drm_gem_is_imported(&bo->tbo.base)) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else @@ -967,7 +972,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) */ domain = amdgpu_bo_get_preferred_domain(adev, domain); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_pin(bo->tbo.base.import_attach); /* force to pin into visible video ram */ @@ -1018,7 +1023,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) if (bo->tbo.pin_count) return; - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_unpin(bo->tbo.base.import_attach); if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { @@ -1263,7 +1268,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); - if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) && old_mem && old_mem->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); @@ -1316,7 +1321,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (r) goto out; - r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true); + r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true, + AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); if (WARN_ON(r)) goto out; @@ -1473,6 +1479,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) } /** + * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo + * @bo: amdgpu VRAM buffer object for which we query the offset + * + * Returns: + * current FB aperture GPU offset of the object. + */ +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + uint64_t offset, fb_base; + + WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM); + + fb_base = adev->gmc.fb_start; + fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base; + return amdgpu_gmc_sign_extend(offset); +} + +/** * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo * @bo: amdgpu object for which we query the offset * @@ -1528,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo) return AMDGPU_PL_OA; case AMDGPU_GEM_DOMAIN_DOORBELL: return AMDGPU_PL_DOORBELL; + case AMDGPU_GEM_DOMAIN_MMIO_REMAP: + return AMDGPU_PL_MMIO_REMAP; default: return TTM_PL_SYSTEM; } @@ -1611,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) case AMDGPU_PL_DOORBELL: placement = "DOORBELL"; break; + case AMDGPU_PL_MMIO_REMAP: + placement = "MMIO REMAP"; + break; case TTM_PL_SYSTEM: default: placement = "CPU"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 375448627f7b..656b8a931dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping { uint64_t last; uint64_t __subtree_last; uint64_t offset; - uint64_t flags; + uint32_t flags; }; /* User space allocated BO in a VM */ @@ -167,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) return AMDGPU_GEM_DOMAIN_OA; case AMDGPU_PL_DOORBELL: return AMDGPU_GEM_DOMAIN_DOORBELL; + case AMDGPU_PL_MMIO_REMAP: + return AMDGPU_GEM_DOMAIN_MMIO_REMAP; default: break; } @@ -304,6 +306,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c14f63cefe67..1578e4e2bf84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block) break; case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): + adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); psp_v14_0_set_psp_funcs(psp); break; case IP_VERSION(14, 0, 5): @@ -447,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block) psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!psp->cmd) { dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); - ret = -ENOMEM; + return -ENOMEM; } adev->psp.xgmi_context.supports_extended_data = @@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } -int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t reg_val, uint32_t mask, bool check_changed) +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, + uint32_t mask, uint32_t flags) { + bool check_changed = flags & PSP_WAITREG_CHANGED; + bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); uint32_t val; int i; struct amdgpu_device *adev = psp->adev; @@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, udelay(1); } + if (verbose) + dev_err(adev->dev, + "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", + reg_index, mask, val, reg_val); + return -ETIME; } @@ -654,6 +662,14 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "BOOT_CFG"; case GFX_CMD_ID_CONFIG_SQ_PERFMON: return "CONFIG_SQ_PERFMON"; + case GFX_CMD_ID_FB_FW_RESERV_ADDR: + return "FB_FW_RESERV_ADDR"; + case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: + return "FB_FW_RESERV_EXT_ADDR"; + case GFX_CMD_ID_SRIOV_SPATIAL_PART: + return "SPATIAL_PARTITION"; + case GFX_CMD_ID_FB_NPS_MODE: + return "NPS_MODE_CHANGE"; default: return "UNKNOWN CMD"; } @@ -865,12 +881,12 @@ static int psp_tmr_init(struct psp_context *psp) pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, - AMDGPU_HAS_VRAM(psp->adev) ? - AMDGPU_GEM_DOMAIN_VRAM : - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM, &psp->tmr_bo, &psp->tmr_mc_addr, pptr); } + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) + psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); return ret; } @@ -984,6 +1000,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp, return ret; } +static int psp_get_fw_reservation_info(struct psp_context *psp, + uint32_t cmd_id, + uint64_t *addr, + uint32_t *size) +{ + int ret; + uint32_t status; + struct psp_gfx_cmd_resp *cmd; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = cmd_id; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + if (ret) { + release_psp_cmd_buf(psp); + return ret; + } + + status = cmd->resp.status; + if (status == PSP_ERR_UNKNOWN_COMMAND) { + release_psp_cmd_buf(psp); + *addr = 0; + *size = 0; + return 0; + } + + *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | + cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; + *size = cmd->resp.uresp.fw_reserve_info.reserve_size; + + release_psp_cmd_buf(psp); + + return 0; +} + +int psp_update_fw_reservation(struct psp_context *psp) +{ + int ret; + uint64_t reserv_addr, reserv_addr_ext; + uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; + struct amdgpu_device *adev = psp->adev; + + mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + switch (mp0_ip_ver) { + case IP_VERSION(14, 0, 2): + if (adev->psp.sos.fw_version < 0x3b0e0d) + return 0; + break; + + case IP_VERSION(14, 0, 3): + if (adev->psp.sos.fw_version < 0x3a0e14) + return 0; + break; + + default: + return 0; + } + + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); + if (ret) + return ret; + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); + if (ret) + return ret; + + if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { + dev_warn(adev->dev, "reserve fw region is not valid!\n"); + return 0; + } + + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + + reserv_size = roundup(reserv_size, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + return ret; + } + + reserv_size_ext = roundup(reserv_size_ext, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, + &adev->mman.fw_reserved_memory_extend, NULL); + if (ret) { + dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); + return ret; + } + + return 0; +} + static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) { struct psp_context *psp = &adev->psp; @@ -1270,6 +1386,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context) psp_copy_fw(psp, context->bin_desc.start_addr, context->bin_desc.size_bytes); + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && + context->mem_context.shared_bo) + context->mem_context.shared_mc_addr = + amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, @@ -2337,11 +2458,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) return false; } +static void psp_update_gpu_addresses(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (psp->cmd_buf_bo && psp->cmd_buf_mem) { + psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); + psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); + psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); + } + if (adev->firmware.rbuf && psp->km_ring.ring_mem) + psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; int ret; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + psp_update_gpu_addresses(adev); + if (!amdgpu_sriov_vf(adev)) { if ((is_psp_fw_valid(psp->kdb)) && (psp->funcs->bootloader_load_kdb != NULL)) { @@ -2440,6 +2577,14 @@ static int psp_hw_start(struct psp_context *psp) return ret; } + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + ret = psp_update_fw_reservation(psp); + if (ret) { + dev_err(adev->dev, "update fw reservation failed!\n"); + return ret; + } + } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; @@ -4125,8 +4270,8 @@ rel_buf: static const struct bin_attribute psp_vbflash_bin_attr = { .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, - .write_new = amdgpu_psp_vbflash_write, - .read_new = amdgpu_psp_vbflash_read, + .write = amdgpu_psp_vbflash_write, + .read = amdgpu_psp_vbflash_read, }; /** @@ -4189,7 +4334,7 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, const struct attribute_group amdgpu_flash_attr_group = { .attrs = flash_attrs, - .bin_attrs_new = bin_flash_attrs, + .bin_attrs = bin_flash_attrs, .is_bin_visible = amdgpu_bin_flash_attr_is_visible, .is_visible = amdgpu_flash_attr_is_visible, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 428adc7f741d..237b624aa51c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -51,6 +51,17 @@ #define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10 #define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11 +/* Command register bit 31 set to indicate readiness */ +#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + +/* Values to check for a successful GFX_CMD response wait. Check against + * both status bits and response state - helps to detect a command failure + * or other unexpected cases like a device drop reading all 0xFFs + */ +#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + extern const struct attribute_group amdgpu_flash_attr_group; enum psp_shared_mem_size { @@ -123,6 +134,9 @@ enum psp_reg_prog_id { PSP_REG_LAST }; +#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */ +#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */ + struct psp_funcs { int (*init_microcode)(struct psp_context *psp); int (*wait_for_bootloader)(struct psp_context *psp); @@ -521,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block; extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block; extern const struct amdgpu_ip_block_version psp_v14_0_ip_block; -extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t field_val, uint32_t mask, bool check_changed); +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, + uint32_t field_val, uint32_t mask, uint32_t flags); extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, uint32_t msec_timeout); @@ -588,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name); int psp_get_fw_attestation_records_addr(struct psp_context *psp, uint64_t *output_ptr); - +int psp_update_fw_reservation(struct psp_context *psp); int psp_load_fw_list(struct psp_context *psp, struct amdgpu_firmware_info **ucode_list, int ucode_count); void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 38face981c3e..6e8aad91bcd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t copy_pos += sizeof(uint32_t); - ta_bin = kzalloc(ta_bin_len, GFP_KERNEL); - if (!ta_bin) - return -ENOMEM; - if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) { - ret = -EFAULT; - goto err_free_bin; - } + ta_bin = memdup_user(&buf[copy_pos], ta_bin_len); + if (IS_ERR(ta_bin)) + return PTR_ERR(ta_bin); /* Set TA context and functions */ set_ta_context_funcs(psp, ta_type, &context); @@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size return -EFAULT; copy_pos += sizeof(uint32_t); - shared_buf = kzalloc(shared_buf_len, GFP_KERNEL); - if (!shared_buf) - return -ENOMEM; - if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) { - ret = -EFAULT; - goto err_free_shared_buf; - } + shared_buf = memdup_user(&buf[copy_pos], shared_buf_len); + if (IS_ERR(shared_buf)) + return PTR_ERR(shared_buf); set_ta_context_funcs(psp, ta_type, &context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index de0944947eaf..e0ee21150860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -122,12 +122,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) -#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms +#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms #define MAX_FLUSH_RETIRE_DWORK_TIMES 100 +#define BYPASS_ALLOCATED_ADDRESS 0x0 +#define BYPASS_INITIALIZATION_ADDRESS 0x1 + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -136,10 +139,14 @@ enum amdgpu_ras_retire_page_reservation { atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); -static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, +static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, uint64_t addr); -static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, +static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); + +static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev); +static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev); + #ifdef CONFIG_X86_MCE_AMD static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); struct mce_notifier_adev_list { @@ -169,18 +176,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre struct eeprom_table_record err_rec; int ret; - if ((address >= adev->gmc.mc_vram_size) || - (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + ret = amdgpu_ras_check_bad_page(adev, address); + if (ret == -EINVAL) { dev_warn(adev->dev, - "RAS WARN: input address 0x%llx is invalid.\n", - address); + "RAS WARN: input address 0x%llx is invalid.\n", + address); return -EINVAL; - } - - if (amdgpu_ras_check_bad_page(adev, address)) { + } else if (ret == 1) { dev_warn(adev->dev, - "RAS WARN: 0x%llx has already been marked as bad page!\n", - address); + "RAS WARN: 0x%llx has already been marked as bad page!\n", + address); return 0; } @@ -207,6 +212,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre return 0; } +static int amdgpu_check_address_validity(struct amdgpu_device *adev, + uint64_t address, uint64_t flags) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_vram_block_info blk_info; + uint64_t page_pfns[32] = {0}; + int i, ret, count; + bool hit = false; + + if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) + return 0; + + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_check_vf_critical_region(adev, address, &hit)) + return -EPERM; + return hit ? -EACCES : 0; + } + + if ((address >= adev->gmc.mc_vram_size) || + (address >= RAS_UMC_INJECT_ADDR_LIMIT)) + return -EFAULT; + + count = amdgpu_umc_lookup_bad_pages_in_a_row(adev, + address, page_pfns, ARRAY_SIZE(page_pfns)); + if (count <= 0) + return -EPERM; + + for (i = 0; i < count; i++) { + memset(&blk_info, 0, sizeof(blk_info)); + ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr, + page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info); + if (!ret) { + /* The input address that needs to be checked is allocated by + * current calling process, so it is necessary to exclude + * the calling process. + */ + if ((flags == BYPASS_ALLOCATED_ADDRESS) && + ((blk_info.task.pid != task_pid_nr(current)) || + strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN))) + return -EACCES; + else if ((flags == BYPASS_INITIALIZATION_ADDRESS) && + (blk_info.task.pid == con->init_task_pid) && + !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN)) + return -EACCES; + } + } + + return 0; +} + static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -297,6 +352,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, op = 2; else if (strstr(str, "retire_page") != NULL) op = 3; + else if (strstr(str, "check_address") != NULL) + op = 4; else if (str[0] && str[1] && str[2] && str[3]) /* ascii string, but commands are not matched. */ return -EINVAL; @@ -311,6 +368,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->inject.address = address; return 0; + } else if (op == 4) { + if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 && + sscanf(str, "%*s %llu %llu", &address, &value) != 2) + return -EINVAL; + + data->op = op; + data->inject.address = address; + data->inject.value = value; + return 0; } if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) @@ -500,6 +566,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, return size; else return ret; + } else if (data.op == 4) { + ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value); + return ret ? ret : size; } if (!amdgpu_ras_is_supported(adev, data.head.block)) @@ -513,22 +582,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, ret = amdgpu_ras_feature_enable(adev, &data.head, 1); break; case 2: - if ((data.inject.address >= adev->gmc.mc_vram_size && - adev->gmc.mc_vram_size) || - (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { - dev_warn(adev->dev, "RAS WARN: input address " - "0x%llx is invalid.", + /* umc ce/ue error injection for a bad page is not allowed */ + if (data.head.block == AMDGPU_RAS_BLOCK__UMC) + ret = amdgpu_ras_check_bad_page(adev, data.inject.address); + if (ret == -EINVAL) { + dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.", data.inject.address); - ret = -EINVAL; break; - } - - /* umc ce/ue error injection for a bad page is not allowed */ - if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && - amdgpu_ras_check_bad_page(adev, data.inject.address)) { - dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " - "already been marked as bad!\n", - data.inject.address); + } else if (ret == 1) { + dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n", + data.inject.address); break; } @@ -1107,6 +1170,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info->de_count, blk_name); } } else { + if (adev->debug_disable_ce_logs) + return; + for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; @@ -2124,7 +2190,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) con->badpages_attr = bin_attr_gpu_vram_bad_pages; sysfs_bin_attr_init(&con->badpages_attr); bin_attrs[0] = &con->badpages_attr; - group.bin_attrs_new = bin_attrs; + group.bin_attrs = bin_attrs; } r = sysfs_create_group(&adev->dev->kobj, &group); @@ -2563,18 +2629,26 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, goto out; } - *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); + *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL); if (!*bps) { ret = -ENOMEM; goto out; } for (; i < data->count; i++) { + if (!data->bps[i].ts) + continue; + (*bps)[i] = (struct ras_badpage){ .bp = data->bps[i].retired_page, .size = AMDGPU_GPU_PAGE_SIZE, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; + + if (amdgpu_ras_check_critical_address(adev, + data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) + continue; + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); if (status == -EBUSY) @@ -2583,7 +2657,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } - *count = data->count; + *count = con->bad_page_num; out: mutex_unlock(&con->recovery_lock); return ret; @@ -2635,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_device *adev = ras->adev; struct list_head device_list, *device_list_handle = NULL; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + unsigned int error_query_mode; enum ras_event_type type; if (hive) { @@ -2663,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) device_list_handle = &device_list; } + if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) { + if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) { + /* wait 500ms to ensure pmfw polling mca bank info done */ + msleep(500); + } + } + type = amdgpu_ras_get_fatal_error_event(adev); list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { @@ -2719,7 +2801,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, unsigned int old_space = data->count + data->space_left; unsigned int new_space = old_space + pages; unsigned int align_space = ALIGN(new_space, 512); - void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL); if (!bps) { return -ENOMEM; @@ -2811,8 +2893,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev, for (j = 0; j < count; j++) { if (amdgpu_ras_check_bad_page_unlock(con, - bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) + bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) { + data->count++; + data->space_left--; continue; + } if (!data->space_left && amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { @@ -2825,6 +2910,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev, sizeof(struct eeprom_table_record)); data->count++; data->space_left--; + con->bad_page_num++; } return 0; @@ -2854,6 +2940,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps[0].address; + err_data->err_addr[i].mem_channel = bps[0].mem_channel; + err_data->err_addr[i].bank = bps[0].bank; + err_data->err_addr[i].err_type = bps[0].err_type; + err_data->err_addr[i].mcumc_id = bps[0].mcumc_id; + } } else { if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data)) return -EINVAL; @@ -2885,6 +2978,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, struct eeprom_table_record *bps, struct ras_err_data *err_data, enum amdgpu_memory_partition nps) { + int i = 0; enum amdgpu_memory_partition save_nps; save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; @@ -2894,6 +2988,13 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps->address; + err_data->err_addr[i].mem_channel = bps->mem_channel; + err_data->err_addr[i].bank = bps->bank; + err_data->err_addr[i].err_type = bps->err_type; + err_data->err_addr[i].mcumc_id = bps->mcumc_id; + } } else { if (bps->address) { if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) @@ -2956,7 +3057,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, ret = __amdgpu_ras_convert_rec_array_from_rom(adev, &bps[i], &err_data, nps); if (ret) - control->ras_num_bad_pages -= adev->umc.retire_unit; + con->bad_page_num -= adev->umc.retire_unit; i += (adev->umc.retire_unit - 1); } else { break; @@ -2970,8 +3071,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, ret = __amdgpu_ras_convert_rec_from_rom(adev, &bps[i], &err_data, nps); if (ret) - control->ras_num_bad_pages -= adev->umc.retire_unit; + con->bad_page_num -= adev->umc.retire_unit; } + + con->eh_data->count_saved = con->eh_data->count; } else { ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages); } @@ -2994,7 +3097,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; struct amdgpu_ras_eeprom_control *control; - int save_count, unit_num, bad_page_num, i; + int save_count, unit_num, i; if (!con || !con->eh_data) { if (new_cnt) @@ -3003,30 +3106,38 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, return 0; } + if (!con->eeprom_control.is_eeprom_valid) { + dev_warn(adev->dev, + "Failed to save EEPROM table data because of EEPROM data corruption!"); + if (new_cnt) + *new_cnt = 0; + + return 0; + } + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; - bad_page_num = control->ras_num_bad_pages; - save_count = data->count - bad_page_num; + unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs; + save_count = con->bad_page_num - control->ras_num_bad_pages; mutex_unlock(&con->recovery_lock); - unit_num = save_count / adev->umc.retire_unit; if (new_cnt) *new_cnt = unit_num; /* only new entries are saved */ - if (save_count > 0) { + if (unit_num > 0) { /*old asics only save pa to eeprom like before*/ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { if (amdgpu_ras_eeprom_append(control, - &data->bps[bad_page_num], save_count)) { + &data->bps[data->count_saved], unit_num)) { dev_err(adev->dev, "Failed to save EEPROM table data!"); return -EIO; } } else { for (i = 0; i < unit_num; i++) { if (amdgpu_ras_eeprom_append(control, - &data->bps[bad_page_num + + &data->bps[data->count_saved + i * adev->umc.retire_unit], 1)) { dev_err(adev->dev, "Failed to save EEPROM table data!"); return -EIO; @@ -3035,6 +3146,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, } dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); + data->count_saved = data->count; } return 0; @@ -3089,17 +3201,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) } } + ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true); + if (ret) + goto out; + ret = amdgpu_ras_eeprom_check(control); if (ret) goto out; /* HW not usable */ - if (amdgpu_ras_is_rma(adev)) { + if (amdgpu_ras_is_rma(adev)) ret = -EHWPOISON; - goto out; - } - - ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true); } out: @@ -3107,18 +3219,24 @@ out: return ret; } -static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, +static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, uint64_t addr) { struct ras_err_handler_data *data = con->eh_data; + struct amdgpu_device *adev = con->adev; int i; + if ((addr >= adev->gmc.mc_vram_size && + adev->gmc.mc_vram_size) || + (addr >= RAS_UMC_INJECT_ADDR_LIMIT)) + return -EINVAL; + addr >>= AMDGPU_GPU_PAGE_SHIFT; for (i = 0; i < data->count; i++) if (addr == data->bps[i].retired_page) - return true; + return 1; - return false; + return 0; } /* @@ -3126,11 +3244,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, * * Note: this check is only for umc block */ -static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, +static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - bool ret = false; + int ret = 0; if (!con || !con->eh_data) return ret; @@ -3214,7 +3332,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); ecc_log->de_queried_count = 0; - ecc_log->prev_de_queried_count = 0; + ecc_log->consumption_q_count = 0; } static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) @@ -3234,7 +3352,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) mutex_destroy(&ecc_log->lock); ecc_log->de_queried_count = 0; - ecc_log->prev_de_queried_count = 0; + ecc_log->consumption_q_count = 0; } static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, @@ -3260,7 +3378,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) page_retirement_dwork.work); struct amdgpu_device *adev = con->adev; struct ras_err_data err_data; - unsigned long err_cnt; /* If gpu reset is ongoing, delay retiring the bad pages */ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { @@ -3272,13 +3389,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) amdgpu_ras_error_data_init(&err_data); amdgpu_umc_handle_bad_pages(adev, &err_data); - err_cnt = err_data.err_addr_cnt; amdgpu_ras_error_data_fini(&err_data); - if (err_cnt && amdgpu_ras_is_rma(adev)) - amdgpu_ras_reset_gpu(adev); - amdgpu_ras_schedule_retirement_dwork(con, AMDGPU_RAS_RETIRE_PAGE_INTERVAL); } @@ -3289,58 +3402,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, int ret = 0; struct ras_ecc_log_info *ecc_log; struct ras_query_if info; - uint32_t timeout = 0; + u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - uint64_t de_queried_count; - uint32_t new_detect_count, total_detect_count; - uint32_t need_query_count = poison_creation_count; - bool query_data_timeout = false; + u64 de_queried_count; + u64 consumption_q_count; enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; memset(&info, 0, sizeof(info)); info.head.block = AMDGPU_RAS_BLOCK__UMC; ecc_log = &ras->umc_ecc_log; - total_detect_count = 0; + ecc_log->de_queried_count = 0; + ecc_log->consumption_q_count = 0; + do { ret = amdgpu_ras_query_error_status_with_event(adev, &info, type); if (ret) return ret; de_queried_count = ecc_log->de_queried_count; - if (de_queried_count > ecc_log->prev_de_queried_count) { - new_detect_count = de_queried_count - ecc_log->prev_de_queried_count; - ecc_log->prev_de_queried_count = de_queried_count; - timeout = 0; - } else { - new_detect_count = 0; - } + consumption_q_count = ecc_log->consumption_q_count; - if (new_detect_count) { - total_detect_count += new_detect_count; - } else { - if (!timeout && need_query_count) - timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; - - if (timeout) { - if (!--timeout) { - query_data_timeout = true; - break; - } - msleep(1); - } - } - } while (total_detect_count < need_query_count); + if (de_queried_count && consumption_q_count) + break; - if (query_data_timeout) { - dev_warn(adev->dev, "Can't find deferred error! count: %u\n", - (need_query_count - total_detect_count)); - return -ENOENT; - } + msleep(100); + } while (--timeout); - if (total_detect_count) + if (de_queried_count) schedule_delayed_work(&ras->page_retirement_dwork, 0); + if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0) + amdgpu_ras_reset_gpu(adev); + return 0; } @@ -3376,6 +3470,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, reset_flags |= msg.reset; } + /* + * Try to ensure poison creation handler is completed first + * to set rma if bad page exceed threshold. + */ + flush_delayed_work(&con->page_retirement_dwork); + /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */ if (reset_flags && !amdgpu_ras_is_rma(adev)) { if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) @@ -3385,8 +3485,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, else reset = reset_flags; - flush_delayed_work(&con->page_retirement_dwork); - con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); @@ -3416,6 +3514,7 @@ static int amdgpu_ras_page_retirement_thread(void *param) if (kthread_should_stop()) break; + mutex_lock(&con->poison_lock); gpu_reset = 0; do { @@ -3428,7 +3527,8 @@ static int amdgpu_ras_page_retirement_thread(void *param) atomic_sub(poison_creation_count, &con->poison_creation_count); atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); } - } while (atomic_read(&con->poison_creation_count)); + } while (atomic_read(&con->poison_creation_count) && + !atomic_read(&con->poison_consumption_count)); if (ret != -EIO) { msg_count = kfifo_len(&con->poison_fifo); @@ -3445,6 +3545,7 @@ static int amdgpu_ras_page_retirement_thread(void *param) /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */ /* Clear poison creation request */ atomic_set(&con->poison_creation_count, 0); + atomic_set(&con->poison_consumption_count, 0); /* Clear poison fifo */ amdgpu_ras_clear_poison_fifo(adev); @@ -3469,9 +3570,12 @@ static int amdgpu_ras_page_retirement_thread(void *param) atomic_sub(msg_count, &con->page_retirement_req_cnt); } + atomic_set(&con->poison_consumption_count, 0); + /* Wake up work to save bad pages to eeprom */ schedule_delayed_work(&con->page_retirement_dwork, 0); } + mutex_unlock(&con->poison_lock); } return 0; @@ -3488,8 +3592,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) control = &con->eeprom_control; ret = amdgpu_ras_eeprom_init(control); - if (ret) - return ret; + control->is_eeprom_valid = !ret; if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) control->ras_num_pa_recs = control->ras_num_recs; @@ -3498,10 +3601,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) adev->umc.ras->get_retire_flip_bits) adev->umc.ras->get_retire_flip_bits(adev); - if (control->ras_num_recs) { + if (control->ras_num_recs && control->is_eeprom_valid) { ret = amdgpu_ras_load_bad_pages(adev); - if (ret) - return ret; + if (ret) { + control->is_eeprom_valid = false; + return 0; + } amdgpu_dpm_send_hbm_bad_pages_num( adev, control->ras_num_bad_pages); @@ -3520,7 +3625,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n"); } - return ret; + return 0; } int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) @@ -3551,8 +3656,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) } mutex_init(&con->recovery_lock); + mutex_init(&con->poison_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); + atomic_set(&con->rma_in_recovery, 0); con->eeprom_control.bad_channel_bitmap = 0; max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); @@ -3570,6 +3677,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) init_waitqueue_head(&con->page_retirement_wq); atomic_set(&con->page_retirement_req_cnt, 0); atomic_set(&con->poison_creation_count, 0); + atomic_set(&con->poison_consumption_count, 0); con->page_retirement_thread = kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); if (IS_ERR(con->page_retirement_thread)) { @@ -3642,6 +3750,8 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) kfree(data); mutex_unlock(&con->recovery_lock); + amdgpu_ras_critical_region_init(adev); + return 0; } /* recovery end */ @@ -4068,6 +4178,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) goto release_con; } + con->init_task_pid = task_pid_nr(current); + get_task_comm(con->init_task_comm, current); + + mutex_init(&con->critical_region_lock); + INIT_LIST_HEAD(&con->critical_region_head); + dev_info(adev->dev, "RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", adev->ras_hw_enabled, adev->ras_enabled); @@ -4347,6 +4463,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) if (!adev->ras_enabled || !con) return 0; + amdgpu_ras_critical_region_fini(adev); + mutex_destroy(&con->critical_region_lock); + list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { if (ras_node->ras_obj) { obj = ras_node->ras_obj; @@ -4414,8 +4533,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev) struct amdgpu_ras *ras; ras = amdgpu_ras_get_context(adev); - if (ras) + if (ras) { ras->ras_err_state = 0; + ras->gpu_reset_flags = 0; + } } void amdgpu_ras_set_err_poison(struct amdgpu_device *adev, @@ -5253,6 +5374,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn) uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT; int ret = 0; + if (amdgpu_ras_check_critical_address(adev, start)) + return 0; + mutex_lock(&con->page_rsv_lock); ret = amdgpu_vram_mgr_query_page_status(mgr, start); if (ret == -ENOENT) @@ -5289,3 +5413,80 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev) return con->is_rma; } + +int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, + struct amdgpu_bo *bo) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_vram_mgr_resource *vres; + struct ras_critical_region *region; + struct drm_buddy_block *block; + int ret = 0; + + if (!bo || !bo->tbo.resource) + return -EINVAL; + + vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource); + + mutex_lock(&con->critical_region_lock); + + /* Check if the bo had been recorded */ + list_for_each_entry(region, &con->critical_region_head, node) + if (region->bo == bo) + goto out; + + /* Record new critical amdgpu bo */ + list_for_each_entry(block, &vres->blocks, link) { + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) { + ret = -ENOMEM; + goto out; + } + region->bo = bo; + region->start = amdgpu_vram_mgr_block_start(block); + region->size = amdgpu_vram_mgr_block_size(block); + list_add_tail(®ion->node, &con->critical_region_head); + } + +out: + mutex_unlock(&con->critical_region_lock); + + return ret; +} + +static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev) +{ + amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory); +} + +static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_critical_region *region, *tmp; + + mutex_lock(&con->critical_region_lock); + list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) { + list_del(®ion->node); + kfree(region); + } + mutex_unlock(&con->critical_region_lock); +} + +bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_critical_region *region; + bool ret = false; + + mutex_lock(&con->critical_region_lock); + list_for_each_entry(region, &con->critical_region_head, node) { + if ((region->start <= addr) && + (addr < (region->start + region->size))) { + ret = true; + break; + } + } + mutex_unlock(&con->critical_region_lock); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 927d6bff734a..6cf0dfd38be8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -492,8 +492,15 @@ struct ras_ecc_err { struct ras_ecc_log_info { struct mutex lock; struct radix_tree_root de_page_tree; - uint64_t de_queried_count; - uint64_t prev_de_queried_count; + uint64_t de_queried_count; + uint64_t consumption_q_count; +}; + +struct ras_critical_region { + struct list_head node; + struct amdgpu_bo *bo; + uint64_t start; + uint64_t size; }; struct amdgpu_ras { @@ -515,6 +522,7 @@ struct amdgpu_ras { /* gpu recovery */ struct work_struct recovery_work; atomic_t in_recovery; + atomic_t rma_in_recovery; struct amdgpu_device *adev; /* error handler data */ struct ras_err_handler_data *eh_data; @@ -557,6 +565,7 @@ struct amdgpu_ras { struct mutex page_retirement_lock; atomic_t page_retirement_req_cnt; atomic_t poison_creation_count; + atomic_t poison_consumption_count; struct mutex page_rsv_lock; DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128); struct ras_ecc_log_info umc_ecc_log; @@ -570,6 +579,17 @@ struct amdgpu_ras { struct ras_event_manager *event_mgr; uint64_t reserved_pages_in_bytes; + + pid_t init_task_pid; + char init_task_comm[TASK_COMM_LEN]; + + int bad_page_num; + + struct list_head critical_region_head; + struct mutex critical_region_lock; + + /* Protect poison injection */ + struct mutex poison_lock; }; struct ras_fs_data { @@ -608,6 +628,7 @@ struct ras_err_handler_data { struct eeprom_table_record *bps; /* the count of entries */ int count; + int count_saved; /* the space can place new entries */ int space_left; }; @@ -973,6 +994,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn); +int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo); +bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr); + int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, enum amdgpu_ras_block block, uint16_t pasid, pasid_notify pasid_fn, void *data, uint32_t reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2c58e09e56f9..3eb3fb55ccb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table header:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table header:%d", + res); } else if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_HEADER_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_HEADER_SIZE); res = -EIO; } else { res = 0; @@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to write table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to write table ras info\n"); return -ENOMEM; } @@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table ras info:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table ras info:%d", + res); } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_V2_1_INFO_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_V2_1_INFO_SIZE); res = -EIO; } else { res = 0; @@ -476,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) control->ras_num_recs = 0; control->ras_num_bad_pages = 0; + control->ras_num_mca_recs = 0; + control->ras_num_pa_recs = 0; control->ras_fri = 0; amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages); @@ -607,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Writing %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Writing %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short write, return error. */ - DRM_ERROR("Wrote %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Wrote %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -738,8 +743,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control, else control->ras_num_mca_recs += num; - control->ras_num_bad_pages = control->ras_num_pa_recs + - control->ras_num_mca_recs * adev->umc.retire_unit; + control->ras_num_bad_pages = con->bad_page_num; Out: kfree(buf); return res; @@ -761,15 +765,19 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) dev_warn(adev->dev, "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - control->tbl_hdr.header = RAS_TABLE_HDR_BAD; - if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { - control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; - control->tbl_rai.health_percent = 0; - } + + if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); if ((amdgpu_bad_page_threshold != -1) && - (amdgpu_bad_page_threshold != -2)) + (amdgpu_bad_page_threshold != -2)) { + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } ras->is_rma = true; + } /* ignore the -ENOTSUPP return value */ amdgpu_dpm_send_rma_reason(adev); @@ -787,8 +795,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("allocating memory for table of size %d bytes failed\n", - control->tbl_hdr.tbl_size); + dev_err(adev->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); res = -ENOMEM; goto Out; } @@ -800,12 +809,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("EEPROM failed reading records:%d\n", - res); + dev_err(adev->dev, "EEPROM failed reading records:%d\n", res); goto Out; } else if (res < buf_size) { - DRM_ERROR("EEPROM read %d out of %d bytes\n", - res, buf_size); + dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res, + buf_size); res = -EIO; goto Out; } @@ -866,11 +874,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not append 0 records\n"); + dev_err(adev->dev, "will not append 0 records\n"); return -EINVAL; } else if (num > control->ras_max_record_count) { - DRM_ERROR("cannot append %d records than the size of table %d\n", - num, control->ras_max_record_count); + dev_err(adev->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); return -EINVAL; } @@ -924,13 +933,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Reading %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Reading %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short read, return error. */ - DRM_ERROR("Read %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Read %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -964,11 +973,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not read 0 records\n"); + dev_err(adev->dev, "will not read 0 records\n"); return -EINVAL; } else if (num > control->ras_num_recs) { - DRM_ERROR("too many records to read:%d available:%d\n", - num, control->ras_num_recs); + dev_err(adev->dev, "too many records to read:%d available:%d\n", + num, control->ras_num_recs); return -EINVAL; } @@ -1300,7 +1309,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { - DRM_ERROR("Out of memory checking RAS table checksum.\n"); + dev_err(adev->dev, + "Out of memory checking RAS table checksum.\n"); return -ENOMEM; } @@ -1309,7 +1319,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control control->ras_header_offset, buf, buf_size); if (res < buf_size) { - DRM_ERROR("Partial read for checksum, res:%d\n", res); + dev_err(adev->dev, "Partial read for checksum, res:%d\n", res); /* On partial reads, return -EIO. */ if (res >= 0) @@ -1334,7 +1344,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); return -ENOMEM; } @@ -1346,7 +1357,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_info_offset, buf, RAS_TABLE_V2_1_INFO_SIZE); if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res); + dev_err(adev->dev, + "Failed to read EEPROM table ras info, res:%d", res); res = res >= 0 ? -EIO : res; goto Out; } @@ -1387,7 +1399,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Failed to read EEPROM table header, res:%d", res); + dev_err(adev->dev, "Failed to read EEPROM table header, res:%d", + res); return res >= 0 ? -EIO : res; } @@ -1448,12 +1461,12 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) if (!__get_eeprom_i2c_addr(adev, control)) return -EINVAL; - control->ras_num_bad_pages = control->ras_num_pa_recs + - control->ras_num_mca_recs * adev->umc.retire_unit; + control->ras_num_bad_pages = ras->bad_page_num; if (hdr->header == RAS_TABLE_HDR_VAL) { - DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", - control->ras_num_bad_pages); + dev_dbg(adev->dev, + "Found existing EEPROM table with %d records", + control->ras_num_bad_pages); if (hdr->version >= RAS_TABLE_VER_V2_1) { res = __read_table_ras_info(control); @@ -1521,3 +1534,31 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) return res < 0 ? res : 0; } + +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control; + int res; + + if (!__is_ras_eeprom_supported(adev) || !ras) + return; + control = &ras->eeprom_control; + if (!control->is_eeprom_valid) + return; + res = __verify_ras_table_checksum(control); + if (res) { + dev_warn(adev->dev, + "RAS table incorrect checksum or error:%d, try to recover\n", + res); + if (!amdgpu_ras_eeprom_reset_table(control)) + if (!amdgpu_ras_save_bad_pages(adev, NULL)) + if (!__verify_ras_table_checksum(control)) { + dev_info(adev->dev, "RAS table recovery succeed\n"); + return; + } + dev_err(adev->dev, "RAS table recovery failed\n"); + control->is_eeprom_valid = false; + } + return; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ec6d7ea37ad0..ebfca4cb5688 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control { /* Record channel info which occurred bad pages */ u32 bad_channel_bitmap; + + bool is_eeprom_valid; }; /* @@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control); +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 50fcd86e1033..be2e56ce1355 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, break; case TTM_PL_TT: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: node = to_ttm_range_mgr_node(res)->mm_nodes; while (start >= node->size << PAGE_SHIFT) start -= node++->size << PAGE_SHIFT; @@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) break; case TTM_PL_TT: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: node = cur->node; cur->node = ++node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index dabfbdf6f1ce..28c4ad62f50e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf, case AMDGPU_RESET_SRC_USER: strscpy(buf, "user trigger", len); break; + case AMDGPU_RESET_SRC_USERQ: + strscpy(buf, "user queue trigger", len); + break; default: strscpy(buf, "unknown", len); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 4d9b9701139b..07b4d37f1db6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS { AMDGPU_RESET_SRC_MES, AMDGPU_RESET_SRC_HWS, AMDGPU_RESET_SRC_USER, + AMDGPU_RESET_SRC_USERQ, }; struct amdgpu_reset_context { @@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init( bool amdgpu_reset_in_recovery(struct amdgpu_device *adev); +static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev, + bool status) +{ + adev->pcie_reset_ctx.occurs_dpc = status; + adev->no_hw_access = status; +} + +static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev) +{ + return adev->pcie_reset_ctx.occurs_dpc; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6ac0ce361a2d..8f6ce948c684 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) return 0; } +/** + * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit + * + * @ring: amdgpu_ring structure holding ring information + * @ndw: number of dwords to allocate in the ring buffer + * + * Allocate @ndw dwords in the ring buffer (all asics). + * doesn't check the max_dw limit as we may be reemitting + * several submissions. + */ +static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw) +{ + /* Align requested size with padding so unlock_commit can + * pad safely */ + ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + + ring->count_dw = ndw; + ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); +} + /** amdgpu_ring_insert_nop - insert NOP packets * * @ring: amdgpu_ring structure holding ring information @@ -333,15 +356,23 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Initialize cached_rptr to 0 */ ring->cached_rptr = 0; + if (!ring->ring_backup) { + ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL); + if (!ring->ring_backup) + return -ENOMEM; + } + /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); + kvfree(ring->ring_backup); return r; } amdgpu_ring_clear_ring(ring); @@ -385,12 +416,12 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); + kvfree(ring->ring_backup); + ring->ring_backup = NULL; dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; ring->me = 0; - - ring->adev->rings[ring->idx] = NULL; } /** @@ -687,6 +718,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, prop->eop_gpu_addr = ring->eop_gpu_addr; prop->use_doorbell = ring->use_doorbell; prop->doorbell_index = ring->doorbell_index; + prop->kernel_queue = true; /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -758,3 +790,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) return true; } + +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ + drm_sched_wqueue_stop(&ring->sched); + /* back up the non-guilty commands */ + amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); +} + +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + unsigned int i; + int r; + + /* verify that the ring is functional */ + r = amdgpu_ring_test_ring(ring); + if (r) + return r; + + /* signal the fence of the bad job */ + if (guilty_fence) + amdgpu_fence_driver_guilty_force_completion(guilty_fence); + /* Re-emit the non-guilty commands */ + if (ring->ring_backup_entries_to_copy) { + amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy); + for (i = 0; i < ring->ring_backup_entries_to_copy; i++) + amdgpu_ring_write(ring, ring->ring_backup[i]); + amdgpu_ring_commit(ring); + } + /* Start the scheduler again */ + drm_sched_wqueue_start(&ring->sched); + return 0; +} + +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type) +{ + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + if (ring->adev->gfx.gfx_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_COMPUTE: + if (ring->adev->gfx.compute_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_SDMA: + if (ring->adev->sdma.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_DEC: + case AMDGPU_RING_TYPE_VCN_ENC: + if (ring->adev->vcn.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_JPEG: + if (ring->adev->jpeg.supported_reset & reset_type) + return true; + break; + default: + break; + } + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index e1f25218943a..b6b649179776 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -114,10 +114,11 @@ struct amdgpu_sched { */ struct amdgpu_fence_driver { uint64_t gpu_addr; - volatile uint32_t *cpu_addr; + uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ uint32_t sync_seq; atomic_t last_seq; + u64 signalled_wptr; bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; @@ -141,6 +142,12 @@ struct amdgpu_fence { /* RB, DMA, etc. */ struct amdgpu_ring *ring; ktime_t start_timestamp; + + /* wptr for the fence for resets */ + u64 wptr; + /* fence context for resets */ + u64 context; + uint32_t seq; }; extern const struct drm_sched_backend_ops amdgpu_sched_ops; @@ -148,6 +155,8 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops; void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_save_wptr(struct dma_fence *fence); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -157,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, - unsigned flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); @@ -202,7 +211,18 @@ struct amdgpu_ring_funcs { bool support_64bit_ptrs; bool no_user_fence; bool secure_submission_supported; - unsigned extra_dw; + + /** + * @extra_bytes: + * + * Optional extra space in bytes that is added to the ring size + * when allocating the BO that holds the contents of the ring. + * This space isn't used for command submission to the ring, + * but is just there to satisfy some hardware requirements or + * implement workarounds. It's up to the implementation of each + * specific ring to initialize this space. + */ + unsigned extra_bytes; /* ring read/write ptr handling */ u64 (*get_rptr)(struct amdgpu_ring *ring); @@ -268,9 +288,9 @@ struct amdgpu_ring_funcs { void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); - int (*reset)(struct amdgpu_ring *ring, unsigned int vmid); + int (*reset)(struct amdgpu_ring *ring, unsigned int vmid, + struct amdgpu_fence *timedout_fence); void (*emit_cleaner_shader)(struct amdgpu_ring *ring); - bool (*is_guilty)(struct amdgpu_ring *ring); }; /** @@ -284,9 +304,12 @@ struct amdgpu_ring { struct amdgpu_bo *ring_obj; uint32_t *ring; + /* backups for resets */ + uint32_t *ring_backup; + unsigned int ring_backup_entries_to_copy; unsigned rptr_offs; u64 rptr_gpu_addr; - volatile u32 *rptr_cpu_addr; + u32 *rptr_cpu_addr; /** * @wptr: @@ -366,19 +389,19 @@ struct amdgpu_ring { * This is the CPU address pointer in the writeback slot. This is used * to commit changes to the GPU. */ - volatile u32 *wptr_cpu_addr; + u32 *wptr_cpu_addr; unsigned fence_offs; u64 fence_gpu_addr; - volatile u32 *fence_cpu_addr; + u32 *fence_cpu_addr; uint64_t current_ctx; char name[16]; u32 trail_seq; unsigned trail_fence_offs; u64 trail_fence_gpu_addr; - volatile u32 *trail_fence_cpu_addr; + u32 *trail_fence_cpu_addr; unsigned cond_exe_offs; u64 cond_exe_gpu_addr; - volatile u32 *cond_exe_cpu_addr; + u32 *cond_exe_cpu_addr; unsigned int set_q_mode_offs; u32 *set_q_mode_ptr; u64 set_q_mode_token; @@ -425,7 +448,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) -#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v)) +#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f)) unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); @@ -458,10 +481,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) { - int i = 0; - while (i <= ring->buf_mask) - ring->ring[i++] = ring->funcs->nop; - + memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1); } static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) @@ -550,4 +570,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index db5791e1a7ce..5aa830a02d80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id) int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) { const u32 *src_ptr; - volatile u32 *dst_ptr; + u32 *dst_ptr; u32 i; int r; @@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) { const __le32 *fw_data; - volatile u32 *dst_ptr; + u32 *dst_ptr; int me, i, max_me; u32 bo_offset = 0; u32 table_offset, table_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index c210625be220..2ce310b31942 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs { * and it also provides a pointer to it which is used by the firmware * to load the clear state in some cases. */ - void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); + void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer); int (*get_cp_table_num)(struct amdgpu_device *adev); int (*resume)(struct amdgpu_device *adev); void (*stop)(struct amdgpu_device *adev); @@ -275,19 +275,19 @@ struct amdgpu_rlc { /* for power gating */ struct amdgpu_bo *save_restore_obj; uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; + uint32_t *sr_ptr; const u32 *reg_list; u32 reg_list_size; /* for clear state */ struct amdgpu_bo *clear_state_obj; uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; + uint32_t *cs_ptr; const struct cs_section_def *cs_data; u32 clear_state_size; /* for cp tables */ struct amdgpu_bo *cp_table_obj; uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; + uint32_t *cp_table_ptr; u32 cp_table_size; /* safe mode for updating CG/PG state */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 9b54a1ece447..8b8a04138711 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -534,71 +534,48 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id) { struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; - int r = -EOPNOTSUPP; - - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { - case IP_VERSION(4, 4, 2): - case IP_VERSION(4, 4, 4): - case IP_VERSION(4, 4, 5): - /* For SDMA 4.x, use the existing DPM interface for backward compatibility, - * we need to convert the logical instance ID to physical instance ID before reset. - */ - r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id)); - break; - case IP_VERSION(5, 0, 0): - case IP_VERSION(5, 0, 1): - case IP_VERSION(5, 0, 2): - case IP_VERSION(5, 0, 5): - case IP_VERSION(5, 2, 0): - case IP_VERSION(5, 2, 2): - case IP_VERSION(5, 2, 4): - case IP_VERSION(5, 2, 5): - case IP_VERSION(5, 2, 6): - case IP_VERSION(5, 2, 3): - case IP_VERSION(5, 2, 1): - case IP_VERSION(5, 2, 7): - if (sdma_instance->funcs->soft_reset_kernel_queue) - r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); - break; - default: - break; - } - return r; + if (sdma_instance->funcs->soft_reset_kernel_queue) + return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); + + return -EOPNOTSUPP; } /** * amdgpu_sdma_reset_engine - Reset a specific SDMA engine * @adev: Pointer to the AMDGPU device * @instance_id: Logical ID of the SDMA engine instance to reset + * @caller_handles_kernel_queues: Skip kernel queue processing. Caller + * will handle it. * * Returns: 0 on success, or a negative error code on failure. */ -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues) { int ret = 0; struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; struct amdgpu_ring *gfx_ring = &sdma_instance->ring; struct amdgpu_ring *page_ring = &sdma_instance->page; - bool gfx_sched_stopped = false, page_sched_stopped = false; mutex_lock(&sdma_instance->engine_reset_mutex); - /* Stop the scheduler's work queue for the GFX and page rings if they are running. - * This ensures that no new tasks are submitted to the queues while - * the reset is in progress. - */ - if (!amdgpu_ring_sched_ready(gfx_ring)) { + + if (!caller_handles_kernel_queues) { + /* Stop the scheduler's work queue for the GFX and page rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ drm_sched_wqueue_stop(&gfx_ring->sched); - gfx_sched_stopped = true; - } - if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_stop(&page_ring->sched); - page_sched_stopped = true; + if (adev->sdma.has_page_queue) + drm_sched_wqueue_stop(&page_ring->sched); } - if (sdma_instance->funcs->stop_kernel_queue) + if (sdma_instance->funcs->stop_kernel_queue) { sdma_instance->funcs->stop_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->stop_kernel_queue(page_ring); + } /* Perform the SDMA reset for the specified instance */ ret = amdgpu_sdma_soft_reset(adev, instance_id); @@ -607,20 +584,25 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) goto exit; } - if (sdma_instance->funcs->start_kernel_queue) + if (sdma_instance->funcs->start_kernel_queue) { sdma_instance->funcs->start_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->start_kernel_queue(page_ring); + } exit: - /* Restart the scheduler's work queue for the GFX and page rings - * if they were stopped by this function. This allows new tasks - * to be submitted to the queues after the reset is complete. - */ - if (!ret) { - if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) { + if (!caller_handles_kernel_queues) { + /* Restart the scheduler's work queue for the GFX and page rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + if (!ret) { + amdgpu_fence_driver_force_completion(gfx_ring); drm_sched_wqueue_start(&gfx_ring->sched); - } - if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_start(&page_ring->sched); + if (adev->sdma.has_page_queue) { + amdgpu_fence_driver_force_completion(page_ring); + drm_sched_wqueue_start(&page_ring->sched); + } } } mutex_unlock(&sdma_instance->engine_reset_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index e5f8951bbb6f..34311f32be4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -172,7 +172,8 @@ struct amdgpu_buffer_funcs { uint32_t byte_count); }; -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id); +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues); #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c index d45ebfb642ca..a0b479d5fff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va) { - u64 seq64_addr, va_flags; struct amdgpu_bo *bo; struct drm_exec exec; + u64 seq64_addr; int r; bo = adev->seq64.sbo; @@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK; - va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC); - r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, - va_flags); + r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, + AMDGPU_VA_RESERVED_SEQ64_SIZE, + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC); if (r) { DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); amdgpu_vm_bo_del(adev, *bo_va); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 11dd2e0f7979..d13e64a69e25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(uint64_t, sched_job_id) __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) - __field(unsigned int, context) - __field(unsigned int, seqno) + __field(u64, context) + __field(u64, seqno) __field(struct dma_fence *, fence) __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->sched_job_id = job->base.id; __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; __assign_str(ring); __entry->num_ibs = job->num_ibs; ), - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", - __entry->sched_job_id, __get_str(timeline), __entry->context, + TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u", + __get_str(timeline), __entry->context, __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(uint64_t, sched_job_id) __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) - __field(unsigned int, context) - __field(unsigned int, seqno) + __field(u64, context) + __field(u64, seqno) __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->sched_job_id = job->base.id; __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; __assign_str(ring); __entry->num_ibs = job->num_ibs; ), - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", - __entry->sched_job_id, __get_str(timeline), __entry->context, + TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u", + __get_str(timeline), __entry->context, __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_ARGS(sched_job, fence), TP_STRUCT__entry( __string(ring, sched_job->base.sched->name) - __field(uint64_t, id) __field(struct dma_fence *, fence) - __field(uint64_t, ctx) - __field(unsigned, seqno) + __field(u64, ctx) + __field(u64, seqno) ), TP_fast_assign( __assign_str(ring); - __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), - TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __get_str(ring), __entry->id, - __entry->fence, __entry->ctx, - __entry->seqno) + TP_printk("job ring=%s need pipe sync to fence=%llu:%llu", + __get_str(ring), __entry->ctx, __entry->seqno) ); TRACE_EVENT(amdgpu_reset_reg_dumps, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9c5df35f05b7..aa9ee5dffa45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case AMDGPU_PL_GWS: case AMDGPU_PL_OA: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: placement->num_placement = 0; return; @@ -226,7 +227,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER); if (r) return r; @@ -299,7 +301,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct amdgpu_bo *abo_src, *abo_dst; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -405,7 +408,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct dma_fence *wipe_fence = NULL; r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence, - false); + false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); if (r) { goto error; } else if (wipe_fence) { @@ -446,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, return false; if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || - res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL) + res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL || + res->mem_type == AMDGPU_PL_MMIO_REMAP) return true; if (res->mem_type != TTM_PL_VRAM) @@ -537,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || old_mem->mem_type == AMDGPU_PL_DOORBELL || + old_mem->mem_type == AMDGPU_PL_MMIO_REMAP || new_mem->mem_type == AMDGPU_PL_GDS || new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA || - new_mem->mem_type == AMDGPU_PL_DOORBELL) { + new_mem->mem_type == AMDGPU_PL_DOORBELL || + new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) { /* Nothing to save here */ amdgpu_bo_move_notify(bo, evict, new_mem); ttm_bo_move_null(bo, new_mem); @@ -628,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.is_iomem = true; mem->bus.caching = ttm_uncached; break; + case AMDGPU_PL_MMIO_REMAP: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset += adev->rmmio_remap.bus_addr; + mem->bus.is_iomem = true; + mem->bus.caching = ttm_uncached; + break; default: return -EINVAL; } @@ -645,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; + else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP) + return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT; return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } @@ -694,7 +708,7 @@ struct amdgpu_ttm_tt { * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * once afterwards to stop HMM tracking */ -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct hmm_range **range) { struct ttm_tt *ttm = bo->tbo.ttm; @@ -731,7 +745,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, readonly = amdgpu_ttm_tt_is_readonly(ttm); r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages, - readonly, NULL, pages, range); + readonly, NULL, range); out_unlock: mmap_read_unlock(mm); if (r) @@ -783,12 +797,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, * that backs user memory and will ultimately be mapped into the device * address space. */ -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range) { unsigned long i; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i] = pages ? pages[i] : NULL; + ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL; } /* @@ -934,7 +948,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { - DRM_ERROR("failed to pin userptr\n"); + dev_err(adev->dev, "failed to pin userptr\n"); return r; } } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { @@ -1060,7 +1074,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); - } else if (ttm->sg && gtt->gobj->import_attach) { + } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) { struct dma_buf_attachment *attach; attach = gtt->gobj->import_attach; @@ -1354,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && (mem->mem_type == TTM_PL_TT || mem->mem_type == AMDGPU_PL_DOORBELL || - mem->mem_type == AMDGPU_PL_PREEMPT)) { + mem->mem_type == AMDGPU_PL_PREEMPT || + mem->mem_type == AMDGPU_PL_MMIO_REMAP)) { flags |= AMDGPU_PTE_SYSTEM; if (ttm->caching == ttm_cached) @@ -1509,7 +1524,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4, AMDGPU_IB_POOL_DELAYED, - &job); + &job, + AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA); if (r) goto out; @@ -1781,7 +1797,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) &ctx->c2p_bo, NULL); if (ret) { - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret); amdgpu_ttm_training_reserve_vram_fini(adev); return ret; } @@ -1793,7 +1809,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) adev, adev->gmc.real_vram_size - reserve_size, reserve_size, &adev->mman.fw_reserved_memory, NULL); if (ret) { - DRM_ERROR("alloc tmr failed(%d)!\n", ret); + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); return ret; @@ -1840,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) adev->mman.ttm_pools = NULL; } +/** + * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO + * @adev: amdgpu device + * + * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the + * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host + * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular + * GEM object (amdgpu_bo_create). + * + * Return: + * * 0 on success or intentional skip (feature not present/unsupported) + * * negative errno on allocation failure + */ +static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev) +{ + struct amdgpu_bo_param bp; + int r; + + /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */ + if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE) + return 0; + + memset(&bp, 0, sizeof(bp)); + + /* Create exactly one GEM BO in the MMIO_REMAP domain. */ + bp.type = ttm_bo_type_device; /* userspace-mappable GEM */ + bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */ + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP; + bp.flags = 0; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo); + if (r) + return r; + + return 0; +} + +/** + * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO + * @adev: amdgpu device + * + * Frees the kernel-owned MMIO_REMAP BO if it was allocated by + * amdgpu_ttm_mmio_remap_bo_init(). + */ +static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->rmmio_remap.bo); + adev->rmmio_remap.bo = NULL; +} + /* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1864,22 +1933,25 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); + dev_err(adev->dev, + "failed initializing buffer object driver(%d).\n", r); return r; } r = amdgpu_ttm_pools_init(adev); if (r) { - DRM_ERROR("failed to init ttm pools(%d).\n", r); + dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); return r; } adev->mman.initialized = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ - r = amdgpu_vram_mgr_init(adev); - if (r) { - DRM_ERROR("Failed initializing VRAM heap.\n"); - return r; + if (!adev->gmc.is_app_apu) { + /* Initialize VRAM pool with all of VRAM divided into pages */ + r = amdgpu_vram_mgr_init(adev); + if (r) { + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); + return r; + } } /* Change the size here instead of the init above so only lpfn is affected */ @@ -1958,7 +2030,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); } - DRM_INFO("amdgpu: %uM of VRAM memory ready\n", + dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n", (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); /* Compute GTT size, either based on TTM limit @@ -1981,10 +2053,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); + dev_err(adev->dev, "Failed initializing GTT heap.\n"); return r; } - DRM_INFO("amdgpu: %uM of GTT memory ready.\n", + dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); if (adev->flags & AMD_IS_APU) { @@ -1995,40 +2067,52 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize doorbell pool on PCI BAR */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) { - DRM_ERROR("Failed initializing doorbell heap.\n"); + dev_err(adev->dev, "Failed initializing doorbell heap.\n"); return r; } /* Create a boorbell page for kernel usages */ r = amdgpu_doorbell_create_kernel_doorbells(adev); if (r) { - DRM_ERROR("Failed to initialize kernel doorbells.\n"); + dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); + return r; + } + + /* Initialize MMIO-remap pool (single page 4K) */ + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1); + if (r) { + dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n"); return r; } + /* Allocate the singleton MMIO_REMAP BO (4K) if supported */ + r = amdgpu_ttm_mmio_remap_bo_init(adev); + if (r) + return r; + /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); if (r) { - DRM_ERROR("Failed initializing PREEMPT heap.\n"); + dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); return r; } /* Initialize various on-chip memory pools */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { - DRM_ERROR("Failed initializing GDS heap.\n"); + dev_err(adev->dev, "Failed initializing GDS heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) { - DRM_ERROR("Failed initializing gws heap.\n"); + dev_err(adev->dev, "Failed initializing gws heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) { - DRM_ERROR("Failed initializing oa heap.\n"); + dev_err(adev->dev, "Failed initializing oa heap.\n"); return r; } if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, @@ -2060,12 +2144,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) /* return the FW reserved memory back to VRAM */ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, + NULL); if (adev->mman.stolen_reserved_size) amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, NULL, NULL); } amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, &adev->mman.sdma_access_ptr); + + amdgpu_ttm_mmio_remap_bo_fini(adev); amdgpu_ttm_fw_reserve_vram_fini(adev); amdgpu_ttm_drv_reserve_vram_fini(adev); @@ -2078,7 +2166,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) drm_dev_exit(idx); } - amdgpu_vram_mgr_fini(adev); + if (!adev->gmc.is_app_apu) + amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); amdgpu_doorbell_fini(adev); @@ -2087,9 +2176,10 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; - DRM_INFO("amdgpu: ttm finalized\n"); + dev_info(adev->dev, "amdgpu: ttm finalized\n"); } /** @@ -2121,8 +2211,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); return; } @@ -2130,8 +2221,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); goto error_free_entity; } } else { @@ -2161,7 +2253,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, struct dma_resv *resv, bool vm_needs_flush, struct amdgpu_job **job, - bool delayed) + bool delayed, u64 k_job_id) { enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : @@ -2171,7 +2263,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, &adev->mman.high_pr; r = amdgpu_job_alloc_with_ib(adev, entity, AMDGPU_FENCE_OWNER_UNDEFINED, - num_dw * 4, pool, job); + num_dw * 4, pool, job, k_job_id); if (r) return r; @@ -2202,7 +2294,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, int r; if (!direct_submit && !ring->sched.ready) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -2210,7 +2303,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, - resv, vm_needs_flush, &job, false); + resv, vm_needs_flush, &job, false, + AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER); if (r) return r; @@ -2237,7 +2331,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, error_free: amdgpu_job_free(job); - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, "Error scheduling IBs (%d)\n", r); return r; } @@ -2245,7 +2339,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, uint64_t dst_addr, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, - bool vm_needs_flush, bool delayed) + bool vm_needs_flush, bool delayed, + u64 k_job_id) { struct amdgpu_device *adev = ring->adev; unsigned int num_loops, num_dw; @@ -2258,7 +2353,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, - &job, delayed); + &job, delayed, k_job_id); if (r) return r; @@ -2328,7 +2423,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, goto err; r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv, - &next, true, true); + &next, true, true, + AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER); if (r) goto err; @@ -2347,7 +2443,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, struct dma_fence **f, - bool delayed) + bool delayed, + u64 k_job_id) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -2356,7 +2453,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int r; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to clear memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to clear memory with ring turned off.\n"); return -EINVAL; } @@ -2376,7 +2474,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, goto error; r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, - &next, true, delayed); + &next, true, delayed, k_job_id); if (r) goto error; @@ -2416,7 +2514,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) man = ttm_manager_type(&adev->mman.bdev, mem_type); break; default: - DRM_ERROR("Trying to evict invalid memory type\n"); + dev_err(adev->dev, "Trying to evict invalid memory type\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 450e4bf093b7..0be2728aa872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,7 +34,8 @@ #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) -#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5) +#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5) +#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 @@ -86,6 +87,7 @@ struct amdgpu_mman { uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; + struct amdgpu_bo *fw_reserved_memory_extend; /* firmware VRAM reservation */ u64 fw_vram_usage_start_offset; @@ -181,14 +183,15 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, struct dma_fence **fence, - bool delayed); + bool delayed, + u64 k_job_id); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct hmm_range **range); void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, struct hmm_range *range); @@ -196,7 +199,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, struct hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct page **pages, struct hmm_range **range) { return -EPERM; @@ -212,7 +214,7 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, } #endif -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range); int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, uint64_t *user_addr); int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index eaddc441c51a..e96f24e9ad57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -32,6 +32,7 @@ static const struct kicker_device kicker_device_list[] = { {0x744B, 0x00}, + {0x7551, 0xC8} }; static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) @@ -1159,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; } + if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf) + adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf); + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { @@ -1397,8 +1401,8 @@ bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { if (adev->pdev->device == kicker_device_list[i].device && - adev->pdev->revision == kicker_device_list[i].revision) - return true; + adev->pdev->revision == kicker_device_list[i].revision) + return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index c92b8794aa73..2e039fb778ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -252,6 +252,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, block, pasid, pasid_fn, data, reset); if (!ret) { atomic_inc(&con->page_retirement_req_cnt); + atomic_inc(&con->poison_consumption_count); wake_up(&con->page_retirement_wq); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 295e7186e156..48e0932f5b62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -44,8 +44,41 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } +int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, + u64 expected_size) +{ + struct amdgpu_bo_va_mapping *va_map; + u64 user_addr; + u64 size; + int r = 0; + + user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; + size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; + + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); + if (!va_map) { + r = -EINVAL; + goto out_err; + } + /* Only validate the userq whether resident in the VM mapping range */ + if (user_addr >= va_map->start && + va_map->last - user_addr + 1 >= size) { + amdgpu_bo_unreserve(vm->root.bo); + return 0; + } + + r = -EINVAL; +out_err: + amdgpu_bo_unreserve(vm->root.bo); + return r; +} + static int -amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, +amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct amdgpu_device *adev = uq_mgr->adev; @@ -54,6 +87,49 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, int r = 0; if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + r = userq_funcs->preempt(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_PREEMPTED; + } + } + + return r; +} + +static int +amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { + r = userq_funcs->restore(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + + return r; +} + +static int +amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || + (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { r = userq_funcs->unmap(uq_mgr, queue); if (r) queue->state = AMDGPU_USERQ_STATE_HUNG; @@ -112,22 +188,6 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, kfree(queue); } -int -amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr) -{ - struct amdgpu_usermode_queue *queue; - int queue_id; - int ret = 0; - - mutex_lock(&uq_mgr->userq_mutex); - /* Resume all the queues for this process */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) - ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; - - mutex_unlock(&uq_mgr->userq_mutex); - return ret; -} - static struct amdgpu_usermode_queue * amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) { @@ -318,7 +378,16 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_bo_unreserve(queue->db_obj.obj); } amdgpu_bo_unref(&queue->db_obj.obj); + +#if defined(CONFIG_DEBUG_FS) + debugfs_remove_recursive(queue->debugfs_queue); +#endif r = amdgpu_userq_unmap_helper(uq_mgr, queue); + /*TODO: It requires a reset for userq hw unmap error*/ + if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { + drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); + queue->state = AMDGPU_USERQ_STATE_HUNG; + } amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); @@ -343,6 +412,46 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp, return -EACCES; } +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) +{ + struct amdgpu_usermode_queue *queue = m->private; + struct amdgpu_bo *bo; + int r; + + if (!queue || !queue->mqd.obj) + return -EINVAL; + + bo = amdgpu_bo_ref(queue->mqd.obj); + r = amdgpu_bo_reserve(bo, true); + if (r) { + amdgpu_bo_unref(&bo); + return -EINVAL; + } + + seq_printf(m, "queue_type: %d\n", queue->queue_type); + seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); + + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + + return 0; +} + +static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_mqd_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_mqd_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_mqd_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static int amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) { @@ -352,6 +461,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) const struct amdgpu_userq_funcs *uq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_db_info db_info; + char *queue_name; bool skip_map_queue; uint64_t index; int qid, r = 0; @@ -359,27 +469,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; - /* Usermode queues are only supported for GFX IP as of now */ - if (args->in.ip_type != AMDGPU_HW_IP_GFX && - args->in.ip_type != AMDGPU_HW_IP_DMA && - args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { - drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n", - args->in.ip_type); - return -EINVAL; - } - r = amdgpu_userq_priority_permit(filp, priority); if (r) return r; - if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && - (args->in.ip_type != AMDGPU_HW_IP_GFX) && - (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && - !amdgpu_is_tmz(adev)) { - drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n"); - return -EINVAL; - } - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); @@ -411,6 +504,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = -ENOMEM; goto unlock; } + + /* Validate the userq virtual address.*/ + if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { + r = -EINVAL; + kfree(queue); + goto unlock; + } queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; @@ -426,6 +528,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) if (index == (uint64_t)-EINVAL) { drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); kfree(queue); + r = -EINVAL; goto unlock; } @@ -475,6 +578,18 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) } } + queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); + if (!queue_name) { + r = -ENOMEM; + goto unlock; + } + +#if defined(CONFIG_DEBUG_FS) + /* Queue dentry per client to hold MQD information */ + queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); + debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); +#endif + kfree(queue_name); args->out.queue_id = qid; @@ -485,22 +600,45 @@ unlock: return r; } -int amdgpu_userq_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) +static int amdgpu_userq_input_args_validate(struct drm_device *dev, + union drm_amdgpu_userq *args, + struct drm_file *filp) { - union drm_amdgpu_userq *args = data; - int r; + struct amdgpu_device *adev = drm_to_adev(dev); switch (args->in.op) { case AMDGPU_USERQ_OP_CREATE: if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) return -EINVAL; - r = amdgpu_userq_create(filp, args); - if (r) - drm_file_err(filp, "Failed to create usermode queue\n"); - break; + /* Usermode queues are only supported for GFX IP as of now */ + if (args->in.ip_type != AMDGPU_HW_IP_GFX && + args->in.ip_type != AMDGPU_HW_IP_DMA && + args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { + drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", + args->in.ip_type); + return -EINVAL; + } + + if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && + (args->in.ip_type != AMDGPU_HW_IP_GFX) && + (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && + !amdgpu_is_tmz(adev)) { + drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); + return -EINVAL; + } + if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || + args->in.queue_va == 0 || + args->in.queue_size == 0) { + drm_file_err(filp, "invalidate userq queue va or size\n"); + return -EINVAL; + } + if (!args->in.wptr_va || !args->in.rptr_va) { + drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); + return -EINVAL; + } + break; case AMDGPU_USERQ_OP_FREE: if (args->in.ip_type || args->in.doorbell_handle || @@ -510,10 +648,34 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, args->in.queue_size || args->in.rptr_va || args->in.wptr_va || - args->in.wptr_va || args->in.mqd || args->in.mqd_size) return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +int amdgpu_userq_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + union drm_amdgpu_userq *args = data; + int r; + + if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) + return -EINVAL; + + switch (args->in.op) { + case AMDGPU_USERQ_OP_CREATE: + r = amdgpu_userq_create(filp, args); + if (r) + drm_file_err(filp, "Failed to create usermode queue\n"); + break; + + case AMDGPU_USERQ_OP_FREE: r = amdgpu_userq_destroy(filp, args->in.queue_id); if (r) drm_file_err(filp, "Failed to destroy usermode queue\n"); @@ -536,7 +698,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) /* Resume all the queues for this process */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - r = amdgpu_userq_map_helper(uq_mgr, queue); + r = amdgpu_userq_restore_helper(uq_mgr, queue); if (r) ret = r; } @@ -546,108 +708,106 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) return ret; } +static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +/* Handle all BOs on the invalidated list, validate them and update the PTs */ static int -amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) +amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, + struct amdgpu_vm *vm) { struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; int ret; - amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + spin_lock(&vm->invalidated_lock); + while (!list_empty(&vm->invalidated)) { + bo_va = list_first_entry(&vm->invalidated, + struct amdgpu_bo_va, + base.vm_status); + spin_unlock(&vm->invalidated_lock); - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (ret) - DRM_ERROR("Fail to validate\n"); + bo = bo_va->base.bo; + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); + if (unlikely(ret)) + return ret; - return ret; + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + return ret; + + /* This moves the bo_va to the done list */ + ret = amdgpu_vm_bo_update(adev, bo_va, false); + if (ret) + return ret; + + spin_lock(&vm->invalidated_lock); + } + spin_unlock(&vm->invalidated_lock); + + return 0; } +/* Make sure the whole VM is ready to be used */ static int -amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr) +amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); - struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; - struct ww_acquire_ctx *ticket; struct drm_exec exec; - struct amdgpu_bo *bo; - struct dma_resv *resv; - bool clear, unlock; - int ret = 0; + int ret; drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { - ret = amdgpu_vm_lock_pd(vm, &exec, 2); + ret = amdgpu_vm_lock_pd(vm, &exec, 1); drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) { - drm_file_err(uq_mgr->file, "Failed to lock PD\n"); + if (unlikely(ret)) goto unlock_all; - } - - /* Lock the done list */ - list_for_each_entry(bo_va, &vm->done, base.vm_status) { - bo = bo_va->base.bo; - if (!bo) - continue; - - ret = drm_exec_lock_obj(&exec, &bo->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) - goto unlock_all; - } - } - - spin_lock(&vm->status_lock); - while (!list_empty(&vm->moved)) { - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, - base.vm_status); - spin_unlock(&vm->status_lock); - /* Per VM BOs never need to bo cleared in the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false); - if (ret) + ret = amdgpu_vm_lock_done_list(vm, &exec, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) goto unlock_all; - spin_lock(&vm->status_lock); - } - - ticket = &exec.ticket; - while (!list_empty(&vm->invalidated)) { - bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, - base.vm_status); - resv = bo_va->base.bo->tbo.base.resv; - spin_unlock(&vm->status_lock); - bo = bo_va->base.bo; - ret = amdgpu_userq_validate_vm_bo(NULL, bo); - if (ret) { - drm_file_err(uq_mgr->file, "Failed to validate BO\n"); + /* This validates PDs, PTs and per VM BOs */ + ret = amdgpu_vm_validate(adev, vm, NULL, + amdgpu_userq_validate_vm, + NULL); + if (unlikely(ret)) goto unlock_all; - } - /* Try to reserve the BO to avoid clearing its ptes */ - if (!adev->debug_vm && dma_resv_trylock(resv)) { - clear = false; - unlock = true; - /* The caller is already holding the reservation lock */ - } else if (dma_resv_locking_ctx(resv) == ticket) { - clear = false; - unlock = false; - /* Somebody else is using the BO right now */ - } else { - clear = true; - unlock = false; - } + /* This locks and validates the remaining evicted BOs */ + ret = amdgpu_userq_bo_validate(adev, &exec, vm); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unlock_all; + } - ret = amdgpu_vm_bo_update(adev, bo_va, clear); + ret = amdgpu_vm_handle_moved(adev, vm, NULL); + if (ret) + goto unlock_all; - if (unlock) - dma_resv_unlock(resv); - if (ret) - goto unlock_all; + ret = amdgpu_vm_update_pdes(adev, vm, false); + if (ret) + goto unlock_all; - spin_lock(&vm->status_lock); - } - spin_unlock(&vm->status_lock); + /* + * We need to wait for all VM updates to finish before restarting the + * queues. Using the done list like that is now ok since everything is + * locked in place. + */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) + dma_fence_wait(bo_va->last_pt_update, false); + dma_fence_wait(vm->last_update, false); ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); if (ret) @@ -664,11 +824,11 @@ static void amdgpu_userq_restore_worker(struct work_struct *work) struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); int ret; - flush_work(&fpriv->evf_mgr.suspend_work.work); + flush_delayed_work(&fpriv->evf_mgr.suspend_work); mutex_lock(&uq_mgr->userq_mutex); - ret = amdgpu_userq_validate_bos(uq_mgr); + ret = amdgpu_userq_vm_validate(uq_mgr); if (ret) { drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); goto unlock; @@ -693,7 +853,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) /* Try to unmap all the queues in this process ctx */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - r = amdgpu_userq_unmap_helper(uq_mgr, queue); + r = amdgpu_userq_preempt_helper(uq_mgr, queue); if (r) ret = r; } @@ -819,7 +979,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&uqm->resume_work); mutex_lock(&uqm->userq_mutex); idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - r = amdgpu_userq_unmap_helper(uqm, queue); + if (adev->in_s0ix) + r = amdgpu_userq_preempt_helper(uqm, queue); + else + r = amdgpu_userq_unmap_helper(uqm, queue); if (r) ret = r; } @@ -844,7 +1007,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { mutex_lock(&uqm->userq_mutex); idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - r = amdgpu_userq_map_helper(uqm, queue); + if (adev->in_s0ix) + r = amdgpu_userq_restore_helper(uqm, queue); + else + r = amdgpu_userq_map_helper(uqm, queue); if (r) ret = r; } @@ -878,7 +1044,7 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - r = amdgpu_userq_unmap_helper(uqm, queue); + r = amdgpu_userq_preempt_helper(uqm, queue); if (r) ret = r; } @@ -912,7 +1078,7 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - r = amdgpu_userq_map_helper(uqm, queue); + r = amdgpu_userq_restore_helper(uqm, queue); if (r) ret = r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index ec040c2fd6c9..c027dd916672 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -65,6 +65,7 @@ struct amdgpu_usermode_queue { struct dma_fence *last_fence; u32 xcp_id; int priority; + struct dentry *debugfs_queue; }; struct amdgpu_userq_funcs { @@ -77,6 +78,12 @@ struct amdgpu_userq_funcs { struct amdgpu_usermode_queue *queue); int (*map)(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue); + int (*preempt)(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue); + int (*restore)(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue); + int (*detect_and_reset)(struct amdgpu_device *adev, + int queue_type); }; /* Usermode queues for gfx */ @@ -113,8 +120,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_eviction_fence *ev_fence); -int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr); - void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_eviction_fence_mgr *evf_mgr); @@ -132,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); +int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, + u64 expected_size); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index a86616c6deef..761bad98da3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv) return le64_to_cpu(*fence_drv->cpu_addr); } +static void +amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv, + u64 seq) +{ + if (fence_drv->cpu_addr) + *fence_drv->cpu_addr = cpu_to_le64(seq); +} + int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, struct amdgpu_usermode_queue *userq) { @@ -239,8 +247,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq, fence = &userq_fence->base; userq_fence->fence_drv = fence_drv; - dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock, - fence_drv->context, seq); + dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock, + fence_drv->context, seq); amdgpu_userq_fence_driver_get(fence_drv); dma_fence_get(fence); @@ -276,7 +284,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq, /* Check if hardware has already processed the job */ spin_lock_irqsave(&fence_drv->fence_list_lock, flags); - if (!dma_fence_is_signaled_locked(fence)) + if (!dma_fence_is_signaled(fence)) list_add_tail(&userq_fence->link, &fence_drv->fences); else dma_fence_put(fence); @@ -334,7 +342,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f) } static const struct dma_fence_ops amdgpu_userq_fence_ops = { - .use_64bit_seqno = true, .get_driver_name = amdgpu_userq_fence_get_driver_name, .get_timeline_name = amdgpu_userq_fence_get_timeline_name, .signaled = amdgpu_userq_fence_signaled, @@ -409,6 +416,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence) dma_fence_put(fence); } +static void +amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence, + int error) +{ + struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; + unsigned long flags; + struct dma_fence *f; + + spin_lock_irqsave(&fence_drv->fence_list_lock, flags); + + f = rcu_dereference_protected(&fence->base, + lockdep_is_held(&fence_drv->fence_list_lock)); + if (f && !dma_fence_is_signaled_locked(f)) + dma_fence_set_error(f, error); + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); +} + +void +amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq) +{ + struct dma_fence *f = userq->last_fence; + + if (f) { + struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); + struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; + u64 wptr = fence->base.seqno; + + amdgpu_userq_fence_driver_set_error(fence, -ECANCELED); + amdgpu_userq_fence_write(fence_drv, wptr); + amdgpu_userq_fence_driver_process(fence_drv); + + } +} + int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h index 97a125ab8a78..d76add2afc77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h @@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, struct amdgpu_usermode_queue *userq); void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq); void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv); +void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq); void amdgpu_userq_fence_driver_destroy(struct kref *ref); int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h new file mode 100644 index 000000000000..1e40ca3b1584 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_UTILS_H_ +#define AMDGPU_UTILS_H_ + +/* ---------- Generic 2‑bit capability attribute encoding ---------- + * 00 INVALID, 01 RO, 10 WO, 11 RW + */ +enum amdgpu_cap_attr { + AMDGPU_CAP_ATTR_INVALID = 0, + AMDGPU_CAP_ATTR_RO = 1 << 0, + AMDGPU_CAP_ATTR_WO = 1 << 1, + AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO), +}; + +#define AMDGPU_CAP_ATTR_BITS 2 +#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1) + +/* Internal helper to build helpers for a given enum NAME */ +#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \ +enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \ +struct NAME##_caps { \ + DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \ +}; \ +static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \ +{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \ +static inline void NAME##_attr_init(struct NAME##_caps *c) \ +{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \ +static inline int NAME##_attr_set(struct NAME##_caps *c, \ + enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \ +{ \ + if (!c) \ + return -EINVAL; \ + if (cap >= NAME##_COUNT) \ + return -EINVAL; \ + if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \ + return -EINVAL; \ + bitmap_write(c->bmap, (unsigned long)attr, \ + NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ + return 0; \ +} \ +static inline int NAME##_attr_get(const struct NAME##_caps *c, \ + enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \ +{ \ + unsigned long v; \ + if (!c || !out) \ + return -EINVAL; \ + if (cap >= NAME##_COUNT) \ + return -EINVAL; \ + v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ + *out = (enum amdgpu_cap_attr)v; \ + return 0; \ +} \ +static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \ +static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \ +static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; } + +/* Element expander for enum creation */ +#define _CAP_ENUM_ELEM(x) x, + +/* Public macro: declare enum + helpers from an X‑macro list */ +#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \ + enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \ + DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) + +#endif /* AMDGPU_UTILS_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 74758b5ffc6c..5c38f0d30c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, 64, direct ? AMDGPU_IB_POOL_DIRECT : - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b9060bcd4806..ce318f5de047 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_FENCE_OWNER_UNDEFINED, ib_size_dw * 4, direct ? AMDGPU_IB_POOL_DIRECT : - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c8885c3d54b3..5e0786ea911b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); MODULE_FIRMWARE(FIRMWARE_VCN5_0_1); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); +static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev); int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i) { @@ -134,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround); mutex_init(&adev->vcn.inst[i].vcn_pg_lock); + mutex_init(&adev->vcn.inst[i].engine_reset_mutex); atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); @@ -183,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; dev_info(adev->dev, - "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", - enc_major, enc_minor, dec_ver, vep, fw_rev); + "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", + i, enc_major, enc_minor, dec_ver, vep, fw_rev); } else { unsigned int version_major, version_minor, family_id; family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n", - version_major, version_minor, family_id); + dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n", + i, version_major, version_minor, family_id); } bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; @@ -255,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) return 0; } -int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) +void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) { int j; if (adev->vcn.harvest_config & (1 << i)) - return 0; + return; amdgpu_bo_free_kernel( &adev->vcn.inst[i].dpg_sram_bo, @@ -284,10 +286,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) amdgpu_ucode_release(&adev->vcn.inst[0].fw); adev->vcn.inst[i].fw = NULL; } + + if (adev->vcn.reg_list) + amdgpu_vcn_reg_dump_fini(adev); + mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock); mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround); - - return 0; } bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) @@ -351,8 +355,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i) if (adev->vcn.harvest_config & (1 << i)) return 0; - cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); - /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to * restore fw data and clear buffer in amdgpu_vcn_resume() */ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset) @@ -404,6 +406,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i) return 0; } +void amdgpu_vcn_get_profile(struct amdgpu_device *adev) +{ + int r; + + mutex_lock(&adev->vcn.workload_profile_mutex); + + if (adev->vcn.workload_profile_active) { + mutex_unlock(&adev->vcn.workload_profile_mutex); + return; + } + r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, + true); + if (r) + dev_warn(adev->dev, + "(%d) failed to enable video power profile mode\n", r); + else + adev->vcn.workload_profile_active = true; + mutex_unlock(&adev->vcn.workload_profile_mutex); +} + +void amdgpu_vcn_put_profile(struct amdgpu_device *adev) +{ + bool pg = true; + int r, i; + + mutex_lock(&adev->vcn.workload_profile_mutex); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) { + pg = false; + break; + } + } + + if (pg) { + r = amdgpu_dpm_switch_power_profile( + adev, PP_SMC_POWER_PROFILE_VIDEO, false); + if (r) + dev_warn( + adev->dev, + "(%d) failed to disable video power profile mode\n", + r); + else + adev->vcn.workload_profile_active = false; + } + + mutex_unlock(&adev->vcn.workload_profile_mutex); +} + static void amdgpu_vcn_idle_work_handler(struct work_struct *work) { struct amdgpu_vcn_inst *vcn_inst = @@ -411,7 +461,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) struct amdgpu_device *adev = vcn_inst->adev; unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; unsigned int i = vcn_inst->inst, j; - int r = 0; if (adev->vcn.harvest_config & (1 << i)) return; @@ -437,16 +486,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) fences += fence[i]; if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { + mutex_lock(&vcn_inst->vcn_pg_lock); vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE); - mutex_lock(&adev->vcn.workload_profile_mutex); - if (adev->vcn.workload_profile_active) { - r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, - false); - if (r) - dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); - adev->vcn.workload_profile_active = false; - } - mutex_unlock(&adev->vcn.workload_profile_mutex); + mutex_unlock(&vcn_inst->vcn_pg_lock); + amdgpu_vcn_put_profile(adev); + } else { schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT); } @@ -456,30 +500,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me]; - int r = 0; atomic_inc(&vcn_inst->total_submission_cnt); cancel_delayed_work_sync(&vcn_inst->idle_work); - /* We can safely return early here because we've cancelled the - * the delayed work so there is no one else to set it to false - * and we don't care if someone else sets it to true. - */ - if (adev->vcn.workload_profile_active) - goto pg_lock; - - mutex_lock(&adev->vcn.workload_profile_mutex); - if (!adev->vcn.workload_profile_active) { - r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, - true); - if (r) - dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); - adev->vcn.workload_profile_active = true; - } - mutex_unlock(&adev->vcn.workload_profile_mutex); - -pg_lock: mutex_lock(&vcn_inst->vcn_pg_lock); vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE); @@ -507,6 +532,7 @@ pg_lock: vcn_inst->pause_dpg_mode(vcn_inst, &new_state); } mutex_unlock(&vcn_inst->vcn_pg_lock); + amdgpu_vcn_get_profile(adev); } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) @@ -600,7 +626,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 64, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) goto err; @@ -780,7 +806,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) goto err; @@ -910,7 +936,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -977,7 +1003,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -1131,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, { struct amdgpu_vcn_inst *vcn; void *log_buf; - volatile struct amdgpu_vcn_fwlog *plog; + struct amdgpu_vcn_fwlog *plog; unsigned int read_pos, write_pos, available, i, read_bytes = 0; unsigned int read_num[2] = {0}; @@ -1144,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; - plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; + plog = (struct amdgpu_vcn_fwlog *)log_buf; read_pos = plog->rptr; write_pos = plog->wptr; @@ -1211,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) { #if defined(CONFIG_DEBUG_FS) - volatile uint32_t *flag = vcn->fw_shared.cpu_addr; + uint32_t *flag = vcn->fw_shared.cpu_addr; void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; - volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; - volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; + struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + vcn->fw_shared.log_offset; *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); fw_log->is_enabled = 1; @@ -1451,3 +1477,161 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, return ret; } + +/** + * amdgpu_vcn_reset_engine - Reset a specific VCN engine + * @adev: Pointer to the AMDGPU device + * @instance_id: VCN engine instance to reset + * + * Returns: 0 on success, or a negative error code on failure. + */ +static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev, + uint32_t instance_id) +{ + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id]; + int r, i; + + mutex_lock(&vinst->engine_reset_mutex); + /* Stop the scheduler's work queue for the dec and enc rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ + drm_sched_wqueue_stop(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_stop(&vinst->ring_enc[i].sched); + + /* Perform the VCN reset for the specified instance */ + r = vinst->reset(vinst); + if (r) + goto unlock; + r = amdgpu_ring_test_ring(&vinst->ring_dec); + if (r) + goto unlock; + for (i = 0; i < vinst->num_enc_rings; i++) { + r = amdgpu_ring_test_ring(&vinst->ring_enc[i]); + if (r) + goto unlock; + } + amdgpu_fence_driver_force_completion(&vinst->ring_dec); + for (i = 0; i < vinst->num_enc_rings; i++) + amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]); + + /* Restart the scheduler's work queue for the dec and enc rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + drm_sched_wqueue_start(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_start(&vinst->ring_enc[i].sched); + +unlock: + mutex_unlock(&vinst->engine_reset_mutex); + + return r; +} + +/** + * amdgpu_vcn_ring_reset - Reset a VCN ring + * @ring: ring to reset + * @vmid: vmid of guilty job + * @timedout_fence: fence of timed out job + * + * This helper is for VCN blocks without unified queues because + * resetting the engine resets all queues in that case. With + * unified queues we have one queue per engine. + * Returns: 0 on success, or a negative error code on failure. + */ +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + + if (adev->vcn.inst[ring->me].using_unified_queue) + return -EINVAL; + + return amdgpu_vcn_reset_engine(adev, ring->me); +} + +int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev, + const struct amdgpu_hwip_reg_entry *reg, u32 count) +{ + adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count, + sizeof(uint32_t), GFP_KERNEL); + if (!adev->vcn.ip_dump) + return -ENOMEM; + adev->vcn.reg_list = reg; + adev->vcn.reg_count = count; + + return 0; +} + +static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev) +{ + kfree(adev->vcn.ip_dump); + adev->vcn.ip_dump = NULL; + adev->vcn.reg_list = NULL; + adev->vcn.reg_count = 0; +} + +void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, j; + bool is_powered; + u32 inst_off; + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * adev->vcn.reg_count; + /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */ + adev->vcn.ip_dump[inst_off] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i)); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) != + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + + if (is_powered) + for (j = 1; j < adev->vcn.reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i)); + } +} + +void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, j; + bool is_powered; + u32 inst_off; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * adev->vcn.reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) != + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < adev->vcn.reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 83adf81defc7..dc8a17bcc3c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -237,6 +237,8 @@ #define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2 +struct amdgpu_hwip_reg_entry; + enum amdgpu_vcn_caps { AMDGPU_VCN_RRMT_ENABLED, }; @@ -330,7 +332,9 @@ struct amdgpu_vcn_inst { struct dpg_pause_state *new_state); int (*set_pg_state)(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); + int (*reset)(struct amdgpu_vcn_inst *vinst); bool using_unified_queue; + struct mutex engine_reset_mutex; }; struct amdgpu_vcn_ras { @@ -360,6 +364,8 @@ struct amdgpu_vcn { bool workload_profile_active; struct mutex workload_profile_mutex; + u32 reg_count; + const struct amdgpu_hwip_reg_entry *reg_list; }; struct amdgpu_fw_shared_rb_ptrs_struct { @@ -495,7 +501,7 @@ struct amdgpu_vcn5_fw_shared { struct amdgpu_fw_shared_rb_setup rb_setup; struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; struct amdgpu_fw_shared_drm_key_wa drm_key_wa; - uint8_t pad3[9]; + uint8_t pad3[404]; }; #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 @@ -510,7 +516,7 @@ enum vcn_ring_type { int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i); int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i); -int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); +void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i); int amdgpu_vcn_resume(struct amdgpu_device *adev, int i); void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); @@ -552,5 +558,14 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev); int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *guilty_fence); +int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev, + const struct amdgpu_hwip_reg_entry *reg, u32 count); +void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block); +void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p); +void amdgpu_vcn_get_profile(struct amdgpu_device *adev); +void amdgpu_vcn_put_profile(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 13f0cdeb59c4..3328ab63376b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev) { ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1); ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1); + ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1); ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs, RATELIMIT_MSG_ON_RELEASE); ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs, + RATELIMIT_MSG_ON_RELEASE); mutex_init(&adev->virt.ras.ras_telemetry_mutex); @@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev) if (virt->ops && virt->ops->req_bad_pages) virt->ops->req_bad_pages(adev); } + +static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry, + bool *hit) +{ + struct amd_sriov_ras_chk_criti *tmp = NULL; + uint32_t checksum, used_size; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) + goto out; + + if (hit) + *hit = tmp->hit ? true : false; + +out: + kfree(tmp); + + return 0; +} + +int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit) +{ + struct amdgpu_virt *virt = &adev->virt; + int r = -EPERM; + + if (!virt->ops || !virt->ops->req_ras_chk_criti) + return -EOPNOTSUPP; + + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host + * will ignore incoming guest messages. Ratelimit the guest messages to + * prevent guest self DOS. + */ + if (__ratelimit(&virt->ras.ras_chk_criti_rs)) { + mutex_lock(&virt->ras.ras_telemetry_mutex); + if (!virt->ops->req_ras_chk_criti(adev, addr)) + r = amdgpu_virt_cache_chk_criti_hit( + adev, virt->fw_reserve.ras_telemetry, hit); + mutex_unlock(&virt->ras.ras_telemetry_mutex); + } + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 577c6194db78..d1172c8e58c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -98,6 +98,7 @@ struct amdgpu_virt_ops { int (*req_ras_err_count)(struct amdgpu_device *adev); int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); int (*req_bad_pages)(struct amdgpu_device *adev); + int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr); }; /* @@ -152,8 +153,10 @@ enum AMDGIM_REG_ACCESS_FLAG { AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), /* Use RLC to program GC regs */ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), - /* Use PSP to program L1_TLB_CNTL*/ + /* Use PSP to program L1_TLB_CNTL */ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3), + /* Use RLCG to program SQ_CONFIG1 */ + AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4), }; struct amdgim_pf2vf_info_v1 { @@ -250,10 +253,15 @@ struct amdgpu_virt_ras_err_handler_data { struct amdgpu_virt_ras { struct ratelimit_state ras_error_cnt_rs; struct ratelimit_state ras_cper_dump_rs; + struct ratelimit_state ras_chk_criti_rs; struct mutex ras_telemetry_mutex; uint64_t cper_rptr; }; +#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT) + +DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -265,12 +273,14 @@ struct amdgpu_virt { struct amdgpu_irq_src rcv_irq; struct work_struct flr_work; - struct work_struct bad_pages_work; + struct work_struct req_bad_pages_work; + struct work_struct handle_bad_pages_work; struct amdgpu_mm_table mm_table; const struct amdgpu_virt_ops *ops; struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; + struct amdgpu_virt_caps virt_caps; uint32_t gim_feature; uint32_t reg_access_mode; int req_init_data_ver; @@ -301,6 +311,9 @@ struct amdgpu_virt { union amd_sriov_ras_caps ras_telemetry_en_caps; struct amdgpu_virt_ras ras; struct amd_sriov_ras_telemetry_error_count count_cache; + + /* hibernate and resume with different VF feature for xgmi enabled system */ + bool is_xgmi_node_migrate_enabled; }; struct amdgpu_video_codec_info; @@ -343,6 +356,10 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_rlcg_error_report_enabled(adev) \ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) +#define amdgpu_sriov_reg_access_sq_config(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) @@ -386,6 +403,10 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) #define amdgpu_sriov_is_mes_info_enable(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) + +#define amdgpu_virt_xgmi_migrate_enabled(adev) \ + ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0) + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); @@ -434,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, enum amdgpu_ras_block block); void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev); +int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 155bb9891a17..79bad9cbe2ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -14,7 +14,6 @@ #include "dce_v8_0.h" #endif #include "dce_v10_0.h" -#include "dce_v11_0.h" #include "ivsrcid/ivsrcid_vislands30.h" #include "amdgpu_vkms.h" #include "amdgpu_display.h" @@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) case CHIP_TONGA: dce_v10_0_disable_dce(adev); break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_VEGAM: - dce_v11_0_disable_dce(adev); - break; case CHIP_TOPAZ: #ifdef CONFIG_DRM_AMDGPU_SI case CHIP_HAINAN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3911c78f8282..8c28e8923f02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -128,6 +128,17 @@ struct amdgpu_vm_tlb_seq_struct { }; /** + * amdgpu_vm_assert_locked - check if VM is correctly locked + * @vm: the VM which schould be tested + * + * Asserts that the VM root PD is locked. + */ +static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) +{ + dma_resv_assert_held(vm->root.bo->tbo.base.resv); +} + +/** * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping * * @adev: amdgpu_device pointer @@ -143,6 +154,8 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, { int r; + amdgpu_vm_assert_locked(vm); + if (vm->pasid == pasid) return 0; @@ -181,12 +194,11 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) struct amdgpu_bo *bo = vm_bo->bo; vm_bo->moved = true; - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&vm_bo->vm_status, &vm->evicted); else list_move_tail(&vm_bo->vm_status, &vm->evicted); - spin_unlock(&vm_bo->vm->status_lock); } /** * amdgpu_vm_bo_moved - vm_bo is moved @@ -198,9 +210,8 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->moved); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -213,9 +224,8 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->idle); - spin_unlock(&vm_bo->vm->status_lock); vm_bo->moved = false; } @@ -229,9 +239,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + spin_lock(&vm_bo->vm->invalidated_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); - spin_unlock(&vm_bo->vm->status_lock); + spin_unlock(&vm_bo->vm->invalidated_lock); } /** @@ -244,10 +254,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) { + amdgpu_vm_assert_locked(vm_bo->vm); vm_bo->moved = true; - spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -260,13 +269,11 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) { - if (vm_bo->bo->parent) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); + if (vm_bo->bo->parent) list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); - spin_unlock(&vm_bo->vm->status_lock); - } else { + else amdgpu_vm_bo_idle(vm_bo); - } } /** @@ -279,9 +286,8 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->done); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -295,10 +301,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) { struct amdgpu_vm_bo_base *vm_bo, *tmp; - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); list_splice_init(&vm->done, &vm->invalidated); list_for_each_entry(vm_bo, &vm->invalidated, vm_status) vm_bo->moved = true; + spin_unlock(&vm->invalidated_lock); + + amdgpu_vm_assert_locked(vm_bo->vm); list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { struct amdgpu_bo *bo = vm_bo->bo; @@ -308,14 +317,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) else if (bo->parent) list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); } - spin_unlock(&vm->status_lock); } /** * amdgpu_vm_update_shared - helper to update shared memory stat * @base: base structure for tracking BO usage in a VM * - * Takes the vm status_lock and updates the shared memory stat. If the basic + * Takes the vm stats_lock and updates the shared memory stat. If the basic * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called * as well. */ @@ -327,7 +335,8 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo); bool shared; - spin_lock(&vm->status_lock); + dma_resv_assert_held(bo->tbo.base.resv); + spin_lock(&vm->stats_lock); shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); if (base->shared != shared) { base->shared = shared; @@ -339,7 +348,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) vm->stats[bo_memtype].drm.private += size; } } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -364,11 +373,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo) * be bo->tbo.resource * @sign: if we should add (+1) or subtract (-1) from the stat * - * Caller need to have the vm status_lock held. Useful for when multiple update + * Caller need to have the vm stats_lock held. Useful for when multiple update * need to happen at the same time. */ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, - struct ttm_resource *res, int sign) + struct ttm_resource *res, int sign) { struct amdgpu_vm *vm = base->vm; struct amdgpu_bo *bo = base->bo; @@ -392,7 +401,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, */ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) vm->stats[res_memtype].drm.purgeable += size; - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype))) + if (!(bo->preferred_domains & + amdgpu_mem_type_to_domain(res_memtype))) vm->stats[bo_memtype].evicted += size; } } @@ -411,9 +421,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base, { struct amdgpu_vm *vm = base->vm; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); amdgpu_vm_update_stats_locked(base, res, sign); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -439,10 +449,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) return; @@ -485,6 +495,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, } /** + * amdgpu_vm_lock_done_list - lock all BOs on the done list + * @vm: vm providing the BOs + * @exec: drm execution context + * @num_fences: number of extra fences to reserve + * + * Lock the BOs on the done list in the DRM execution context. + */ +int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences) +{ + struct list_head *prev = &vm->done; + struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; + int ret; + + /* We can only trust prev->next while holding the lock */ + spin_lock(&vm->invalidated_lock); + while (!list_is_head(prev->next, &vm->done)) { + bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); + spin_unlock(&vm->invalidated_lock); + + bo = bo_va->base.bo; + if (bo) { + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1); + if (unlikely(ret)) + return ret; + } + spin_lock(&vm->invalidated_lock); + prev = prev->next; + } + spin_unlock(&vm->invalidated_lock); + + return 0; +} + +/** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * * @adev: amdgpu device pointer @@ -575,7 +621,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); - struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_vm_bo_base *bo_base, *tmp; struct amdgpu_bo *bo; int r; @@ -588,13 +634,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - spin_lock(&vm->status_lock); - while (!list_empty(&vm->evicted)) { - bo_base = list_first_entry(&vm->evicted, - struct amdgpu_vm_bo_base, - vm_status); - spin_unlock(&vm->status_lock); - + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { bo = bo_base->bo; r = validate(param, bo); @@ -607,37 +647,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); amdgpu_vm_bo_relocated(bo_base); } - spin_lock(&vm->status_lock); } - while (ticket && !list_empty(&vm->evicted_user)) { - bo_base = list_first_entry(&vm->evicted_user, - struct amdgpu_vm_bo_base, - vm_status); - spin_unlock(&vm->status_lock); - - bo = bo_base->bo; - if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { - struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); + if (ticket) { + list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user, + vm_status) { + bo = bo_base->bo; + dma_resv_assert_held(bo->tbo.base.resv); - pr_warn_ratelimited("Evicted user BO is not reserved\n"); - if (ti) { - pr_warn_ratelimited("pid %d\n", ti->pid); - amdgpu_vm_put_task_info(ti); - } + r = validate(param, bo); + if (r) + return r; - return -EINVAL; + amdgpu_vm_bo_invalidated(bo_base); } - - r = validate(param, bo); - if (r) - return r; - - amdgpu_vm_bo_invalidated(bo_base); - - spin_lock(&vm->status_lock); } - spin_unlock(&vm->status_lock); amdgpu_vm_eviction_lock(vm); vm->evicting = false; @@ -654,22 +678,29 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if VM is not evicting. + * True if VM is not evicting and all VM entities are not stopped */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool empty; bool ret; + amdgpu_vm_assert_locked(vm); + amdgpu_vm_eviction_lock(vm); ret = !vm->evicting; amdgpu_vm_eviction_unlock(vm); - spin_lock(&vm->status_lock); - empty = list_empty(&vm->evicted); - spin_unlock(&vm->status_lock); + ret &= list_empty(&vm->evicted); - return ret && empty; + spin_lock(&vm->immediate.lock); + ret &= !vm->immediate.stopped; + spin_unlock(&vm->immediate.lock); + + spin_lock(&vm->delayed.lock); + ret &= !vm->delayed.stopped; + spin_unlock(&vm->delayed.lock); + + return ret; } /** @@ -765,6 +796,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; + struct amdgpu_fence *af; unsigned int patch; int r; @@ -830,6 +862,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, r = amdgpu_fence_emit(ring, &fence, NULL, 0); if (r) return r; + /* this is part of the job's context */ + af = container_of(fence, struct amdgpu_fence, base); + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } if (vm_flush_needed) { @@ -946,16 +981,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - struct amdgpu_vm_bo_base *entry; + struct amdgpu_vm_bo_base *entry, *tmp; bool flush_tlb_needed = false; - LIST_HEAD(relocated); int r, idx; - spin_lock(&vm->status_lock); - list_splice_init(&vm->relocated, &relocated); - spin_unlock(&vm->status_lock); + amdgpu_vm_assert_locked(vm); - if (list_empty(&relocated)) + if (list_empty(&vm->relocated)) return 0; if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -966,11 +998,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES); if (r) goto error; - list_for_each_entry(entry, &relocated, vm_status) { + list_for_each_entry(entry, &vm->relocated, vm_status) { /* vm_flush_needed after updating moved PDEs */ flush_tlb_needed |= entry->moved; @@ -986,9 +1019,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (flush_tlb_needed) atomic64_inc(&vm->tlb_seq); - while (!list_empty(&relocated)) { - entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, - vm_status); + list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) { amdgpu_vm_bo_idle(entry); } @@ -1135,7 +1166,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, dma_fence_put(tmp); } - r = vm->update_funcs->prepare(¶ms, sync); + r = vm->update_funcs->prepare(¶ms, sync, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE); if (r) goto error_free; @@ -1214,9 +1246,9 @@ error_free: void amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]) { - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -1271,7 +1303,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } else { struct drm_gem_object *obj = &bo->tbo.base; - if (obj->import_attach && bo_va->is_xgmi) { + if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); @@ -1328,13 +1360,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here * but in case of something, we filter the flags in first place */ - if (!(mapping->flags & AMDGPU_PTE_READABLE)) + if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE)) update_flags &= ~AMDGPU_PTE_READABLE; - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) + if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE)) update_flags &= ~AMDGPU_PTE_WRITEABLE; /* Apply ASIC specific mapping flags */ - amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); + amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags, + &update_flags); trace_amdgpu_vm_bo_update(mapping); @@ -1475,7 +1508,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, struct dma_fence *fence) { - if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev)) + if (mapping->flags & AMDGPU_VM_PAGE_PRT) amdgpu_vm_add_prt_cb(adev, fence); kfree(mapping); } @@ -1582,29 +1615,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) { - struct amdgpu_bo_va *bo_va; + struct amdgpu_bo_va *bo_va, *tmp; struct dma_resv *resv; bool clear, unlock; int r; - spin_lock(&vm->status_lock); - while (!list_empty(&vm->moved)) { - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, - base.vm_status); - spin_unlock(&vm->status_lock); - + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; - spin_lock(&vm->status_lock); } + spin_lock(&vm->invalidated_lock); while (!list_empty(&vm->invalidated)) { bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, base.vm_status); resv = bo_va->base.bo->tbo.base.resv; - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); /* Try to reserve the BO to avoid clearing its ptes */ if (!adev->debug_vm && dma_resv_trylock(resv)) { @@ -1631,14 +1659,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, * validation */ if (vm->is_compute_context && - bo_va->base.bo->tbo.base.import_attach && + drm_gem_is_imported(&bo_va->base.bo->tbo.base) && (!bo_va->base.bo->tbo.resource || bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) amdgpu_vm_bo_evicted_user(&bo_va->base); - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); return 0; } @@ -1754,7 +1782,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, list_add(&mapping->list, &bo_va->invalids); amdgpu_vm_it_insert(mapping, &vm->va); - if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev)) + if (mapping->flags & AMDGPU_VM_PAGE_PRT) amdgpu_vm_prt_get(adev); if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved) @@ -1814,7 +1842,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr, uint64_t offset, - uint64_t size, uint64_t flags) + uint64_t size, uint32_t flags) { struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo *bo = bo_va->base.bo; @@ -1873,7 +1901,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr, uint64_t offset, - uint64_t size, uint64_t flags) + uint64_t size, uint32_t flags) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo = bo_va->base.bo; @@ -2167,9 +2195,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev, } } - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2277,10 +2305,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem, for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1); amdgpu_vm_update_stats_locked(bo_base, new_mem, +1); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } amdgpu_vm_bo_invalidate(bo, evicted); @@ -2395,10 +2423,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, else adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; - DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", - vm_size, adev->vm_manager.num_level + 1, - adev->vm_manager.block_size, - adev->vm_manager.fragment_size); + dev_info( + adev->dev, + "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", + vm_size, adev->vm_manager.num_level + 1, + adev->vm_manager.block_size, adev->vm_manager.fragment_size); } /** @@ -2409,13 +2438,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_BOOKKEEP, - true, timeout); + timeout = drm_sched_entity_flush(&vm->immediate, timeout); if (timeout <= 0) return timeout; - return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + return drm_sched_entity_flush(&vm->delayed, timeout); } static void amdgpu_vm_destroy_task_info(struct kref *kref) @@ -2447,7 +2474,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid) */ void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info) { - kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info); + if (task_info) + kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info); } /** @@ -2507,11 +2535,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) if (!vm->task_info) return; - if (vm->task_info->pid == current->pid) + if (vm->task_info->task.pid == current->pid) return; - vm->task_info->pid = current->pid; - get_task_comm(vm->task_info->task_name, current); + vm->task_info->task.pid = current->pid; + get_task_comm(vm->task_info->task.comm, current); if (current->group_leader->mm != current->mm) return; @@ -2547,11 +2575,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->idle); + spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->invalidated); - spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->done); INIT_KFIFO(vm->faults); + spin_lock_init(&vm->stats_lock); r = amdgpu_vm_init_entities(adev, vm); if (r) @@ -2564,8 +2593,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2607,7 +2636,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_vm_create_task_info(vm); if (r) - DRM_DEBUG("Failed to create task info for VM\n"); + dev_dbg(adev->dev, "Failed to create task info for VM\n"); amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); @@ -2658,8 +2687,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) /* Update VM state */ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2730,7 +2759,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dma_fence_put(vm->last_tlb_flush); list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { - if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) { + if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); prt_fini_needed = false; } @@ -2761,10 +2790,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { - if (vm->reserved_vmid[i]) { - amdgpu_vmid_free_reserved(adev, i); - vm->reserved_vmid[i] = false; - } + amdgpu_vmid_free_reserved(adev, vm, i); } ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); @@ -2774,7 +2800,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dev_warn(adev->dev, "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n", - ti->process_name, ti->pid, ti->task_name, ti->tgid); + ti->process_name, ti->task.pid, ti->task.comm, ti->tgid); } amdgpu_vm_put_task_info(vm->task_info); @@ -2860,6 +2886,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; /* No valid flags defined yet */ if (args->in.flags) @@ -2868,17 +2895,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ - if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { - amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); - fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; - } - + amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); break; case AMDGPU_VM_OP_UNRESERVE_VMID: - if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { - amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0)); - fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; - } + amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0)); break; default: return -EINVAL; @@ -2982,7 +3002,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, error_unlock: amdgpu_bo_unreserve(root); if (r < 0) - DRM_ERROR("Can't handle page fault (%d)\n", r); + dev_err(adev->dev, "Can't handle page fault (%d)\n", r); error_unref: amdgpu_bo_unref(&root); @@ -3016,7 +3036,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) unsigned int total_done_objs = 0; unsigned int id = 0; - spin_lock(&vm->status_lock); + amdgpu_vm_assert_locked(vm); + seq_puts(m, "\tIdle BOs:\n"); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { if (!bo_va->base.bo) @@ -3054,11 +3075,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) id = 0; seq_puts(m, "\tInvalidated BOs:\n"); + spin_lock(&vm->invalidated_lock); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { if (!bo_va->base.bo) continue; total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } + spin_unlock(&vm->invalidated_lock); total_invalidated_objs = id; id = 0; @@ -3068,7 +3091,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) continue; total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } - spin_unlock(&vm->status_lock); total_done_objs = id; seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, @@ -3156,3 +3178,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo) { return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; } + +void amdgpu_vm_print_task_info(struct amdgpu_device *adev, + struct amdgpu_task_info *task_info) +{ + dev_err(adev->dev, + " Process %s pid %d thread %s pid %d\n", + task_info->process_name, task_info->tgid, + task_info->task.comm, task_info->task.pid); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index f3ad687125ad..adc5c9161fa8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base { /* protected by bo being reserved */ struct amdgpu_vm_bo_base *next; - /* protected by vm status_lock */ + /* protected by vm reservation and invalidated_lock */ struct list_head vm_status; /* if the bo is counted as shared in mem stats - * protected by vm status_lock */ + * protected by vm BO being reserved */ bool shared; /* protected by the BO being reserved */ @@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs { }; struct amdgpu_task_info { + struct drm_wedge_task_info task; char process_name[TASK_COMM_LEN]; - char task_name[TASK_COMM_LEN]; - pid_t pid; pid_t tgid; struct kref refcount; }; @@ -309,7 +308,7 @@ struct amdgpu_vm_update_params { struct amdgpu_vm_update_funcs { int (*map_table)(struct amdgpu_bo_vm *bo); int (*prepare)(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, u64 k_job_id); int (*update)(struct amdgpu_vm_update_params *p, struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); @@ -344,18 +343,22 @@ struct amdgpu_vm { bool evicting; unsigned int saved_flags; - /* Lock to protect vm_bo add/del/move on all lists of vm */ - spinlock_t status_lock; - - /* Memory statistics for this vm, protected by status_lock */ + /* Memory statistics for this vm, protected by stats_lock */ + spinlock_t stats_lock; struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]; + /* + * The following lists contain amdgpu_vm_bo_base objects for either + * PDs, PTs or per VM BOs. The state transits are: + * + * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle + * + * Lists are protected by the root PD dma_resv lock. + */ + /* Per-VM and PT BOs who needs a validation */ struct list_head evicted; - /* BOs for user mode queues that need a validation */ - struct list_head evicted_user; - /* PT BOs which relocated and their parent need an update */ struct list_head relocated; @@ -365,15 +368,32 @@ struct amdgpu_vm { /* All BOs of this VM not currently in the state machine */ struct list_head idle; + /* + * The following lists contain amdgpu_vm_bo_base objects for BOs which + * have their own dma_resv object and not depend on the root PD. Their + * state transits are: + * + * evicted_user or invalidated -> done + * + * Lists are protected by the invalidated_lock. + */ + spinlock_t invalidated_lock; + + /* BOs for user mode queues that need a validation */ + struct list_head evicted_user; + /* regular invalidated BOs, but not yet updated in the PT */ struct list_head invalidated; - /* BO mappings freed, but not yet updated in the PT */ - struct list_head freed; - /* BOs which are invalidated, has been updated in the PTs */ struct list_head done; + /* + * This list contains amdgpu_bo_va_mapping objects which have been freed + * but not updated in the PTs + */ + struct list_head freed; + /* contains the page directory */ struct amdgpu_vm_bo_base root; struct dma_fence *last_update; @@ -395,7 +415,7 @@ struct amdgpu_vm { struct dma_fence *last_unlocked; unsigned int pasid; - bool reserved_vmid[AMDGPU_MAX_VMHUBS]; + struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; @@ -492,6 +512,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, unsigned int num_fences); +int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences); bool amdgpu_vm_ready(struct amdgpu_vm *vm); uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -539,11 +561,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr, uint64_t offset, - uint64_t size, uint64_t flags); + uint64_t size, uint32_t flags); int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr, uint64_t offset, - uint64_t size, uint64_t flags); + uint64_t size, uint32_t flags); int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr); @@ -668,4 +690,12 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); +void amdgpu_vm_print_task_info(struct amdgpu_device *adev, + struct amdgpu_task_info *task_info); + +#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \ + list_for_each_entry(mapping, &(bo_va)->valids, list) +#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \ + list_for_each_entry(mapping, &(bo_va)->invalids, list) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 0c1ef5850a5e..22e2e5b47341 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) * * @p: see amdgpu_vm_update_params definition * @sync: sync obj with fences to wait on + * @k_job_id: the id for tracing/debug purposes * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, + u64 k_job_id) { if (!sync) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 30022123b0bf..7a4c12ff9b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_vm.h" +#include "amdgpu_job.h" /* * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt @@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, + AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR); if (r) goto exit; @@ -541,9 +543,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) entry->bo->vm_bo = NULL; ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); - spin_lock(&entry->vm->status_lock); list_del(&entry->vm_status); - spin_unlock(&entry->vm->status_lock); amdgpu_bo_unref(&entry->bo); } @@ -587,7 +587,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, struct amdgpu_vm_pt_cursor seek; struct amdgpu_vm_bo_base *entry; - spin_lock(¶ms->vm->status_lock); for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { if (entry && entry->bo) list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); @@ -595,7 +594,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, /* enter start node now */ list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); - spin_unlock(¶ms->vm->status_lock); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 46d9fb433ab2..36805dcfa159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) /* Allocate a new job for @count PTE updates */ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, - unsigned int count) + unsigned int count, u64 k_job_id) { enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE : AMDGPU_IB_POOL_DELAYED; @@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, - ndw * 4, pool, &p->job); + ndw * 4, pool, &p->job, k_job_id); if (r) return r; @@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, * * @p: see amdgpu_vm_update_params definition * @sync: amdgpu_sync object with fences to wait for + * @k_job_id: identifier of the job, for tracing purpose * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, u64 k_job_id) { int r; - r = amdgpu_vm_sdma_alloc_job(p, 0); + r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id); if (r) return r; @@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (r) return r; - r = amdgpu_vm_sdma_alloc_job(p, count); + r = amdgpu_vm_sdma_alloc_job(p, count, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c index 51cddfa3f1e8..5d26797356a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c @@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work) } static const struct dma_fence_ops amdgpu_tlb_fence_ops = { - .use_64bit_seqno = true, .get_driver_name = amdgpu_tlb_fence_get_driver_name, .get_timeline_name = amdgpu_tlb_fence_get_timeline_name }; @@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm INIT_WORK(&f->work, amdgpu_tlb_fence_work); spin_lock_init(&f->lock); - dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock, - vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); + dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock, + vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); /* TODO: We probably need a separate wq here */ dma_fence_get(&f->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 121ee17b522b..474bfe36c0c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -379,9 +379,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block) if (ret) goto out; - /* TODO: Add queue reset mask when FW fully supports it */ adev->vpe.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); + if (!amdgpu_sriov_vf(adev)) + adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; ret = amdgpu_vpe_sysfs_reset_mask_init(adev); if (ret) goto out; @@ -435,6 +436,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; + cancel_delayed_work_sync(&adev->vpe.idle_work); + vpe_ring_stop(vpe); /* Power off VPE */ @@ -445,10 +448,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block) static int vpe_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = ip_block->adev; - - cancel_delayed_work_sync(&adev->vpe.idle_work); - return vpe_hw_fini(ip_block); } @@ -874,6 +873,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring) schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } +static int vpe_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, + AMD_PG_STATE_GATE); + if (r) + return r; + r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, + AMD_PG_STATE_UNGATE); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev, struct device_attribute *attr, char *buf) @@ -942,6 +962,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = { .preempt_ib = vpe_ring_preempt_ib, .begin_use = vpe_ring_begin_use, .end_use = vpe_ring_end_use, + .reset = vpe_ring_reset, }; static void vpe_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 07c936e90d8e..a5adb2ed9b3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -396,43 +396,33 @@ out: return ret; } -static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man, - struct drm_printer *printer) +int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, + uint64_t address, struct amdgpu_vram_block_info *info) { - DRM_DEBUG_DRIVER("Dummy vram mgr debug\n"); -} - -static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man, - struct ttm_resource *res, - const struct ttm_place *place, - size_t size) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n"); - return false; -} + struct amdgpu_vram_mgr_resource *vres; + struct drm_buddy_block *block; + u64 start, size; + int ret = -ENOENT; -static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man, - struct ttm_resource *res, - const struct ttm_place *place, - size_t size) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n"); - return true; -} + mutex_lock(&mgr->lock); + list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) { + list_for_each_entry(block, &vres->blocks, link) { + start = amdgpu_vram_mgr_block_start(block); + size = amdgpu_vram_mgr_block_size(block); + if ((start <= address) && (address < (start + size))) { + info->start = start; + info->size = size; + memcpy(&info->task, &vres->task, sizeof(vres->task)); + ret = 0; + goto out; + } + } + } -static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *res) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n"); -} +out: + mutex_unlock(&mgr->lock); -static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_resource **res) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr new\n"); - return -ENOSPC; + return ret; } /** @@ -568,6 +558,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, remaining_size -= size; } + vres->task.pid = task_pid_nr(current); + get_task_comm(vres->task.comm, current); + list_add_tail(&vres->vres_node, &mgr->allocated_vres_list); + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { struct drm_buddy_block *dcc_block; unsigned long dcc_start; @@ -645,12 +639,15 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, uint64_t vis_usage = 0; mutex_lock(&mgr->lock); + + list_del(&vres->vres_node); + memset(&vres->task, 0, sizeof(vres->task)); + list_for_each_entry(block, &vres->blocks, link) vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); @@ -896,14 +893,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, mutex_unlock(&mgr->lock); } -static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = { - .alloc = amdgpu_dummy_vram_mgr_new, - .free = amdgpu_dummy_vram_mgr_del, - .intersects = amdgpu_dummy_vram_mgr_intersects, - .compatible = amdgpu_dummy_vram_mgr_compatible, - .debug = amdgpu_dummy_vram_mgr_debug -}; - static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .alloc = amdgpu_vram_mgr_new, .free = amdgpu_vram_mgr_del, @@ -934,18 +923,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) mutex_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); + INIT_LIST_HEAD(&mgr->allocated_vres_list); mgr->default_page_size = PAGE_SIZE; - if (!adev->gmc.is_app_apu) { - man->func = &amdgpu_vram_mgr_func; - - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); - if (err) - return err; - } else { - man->func = &amdgpu_dummy_vram_mgr_func; - DRM_INFO("Setup dummy vram mgr\n"); - } + man->func = &amdgpu_vram_mgr_func; + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + if (err) + return err; ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index b256cbc2bc27..5f5fd9a911c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -35,12 +35,26 @@ struct amdgpu_vram_mgr { struct list_head reserved_pages; atomic64_t vis_usage; u64 default_page_size; + struct list_head allocated_vres_list; +}; + +struct amdgpu_vres_task { + pid_t pid; + char comm[TASK_COMM_LEN]; +}; + +struct amdgpu_vram_block_info { + u64 start; + u64 size; + struct amdgpu_vres_task task; }; struct amdgpu_vram_mgr_resource { struct ttm_resource base; struct list_head blocks; unsigned long flags; + struct list_head vres_node; + struct amdgpu_vres_task task; }; static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) @@ -66,7 +80,13 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res) static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res) { - to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED; + struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res); + + WARN_ON(ares->flags & DRM_BUDDY_CLEARED); + ares->flags |= DRM_BUDDY_CLEARED; } +int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, + uint64_t address, struct amdgpu_vram_block_info *info); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 322816805bfb..1083db8cea2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, xcp->valid = true; } +static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr, + int xcp_id) +{ + struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id]; + struct amdgpu_device *adev = xcp_mgr->adev; + uint32_t inst_mask; + uint64_t uid; + int i; + + if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) && + inst_mask) { + i = GET_INST(GC, (ffs(inst_mask) - 1)); + uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info, + AMDGPU_UID_TYPE_XCD, i); + if (uid) + xcp->unique_id = uid; + } +} + int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) { struct amdgpu_device *adev = xcp_mgr->adev; @@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) else xcp_mgr->xcp[i].mem_id = mem_id; } + __amdgpu_xcp_set_unique_id(xcp_mgr, i); } xcp_mgr->num_xcps = num_xcps; @@ -218,15 +238,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode); } -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr) { - int mode; + if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + return true; if (!amdgpu_sriov_vf(xcp_mgr->adev) && xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return xcp_mgr->mode; + return true; - if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE && + xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS) + return true; + + return false; +} + +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int mode; + + if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr)) return xcp_mgr->mode; if (!(flags & AMDGPU_XCP_FL_LOCKED)) @@ -394,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev; p_ddev->driver = adev->xcp_mgr->xcp[i].driver; p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager; + amdgpu_xcp_drm_dev_free(p_ddev); } } @@ -445,6 +478,222 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds) +{ + u32 sel_xcp_id; + int i; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + + if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { + u32 least_ref_cnt = ~0; + + fpriv->xcp_id = 0; + for (i = 0; i < xcp_mgr->num_xcps; i++) { + u32 total_ref_cnt; + + total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt); + if (total_ref_cnt < least_ref_cnt) { + fpriv->xcp_id = i; + least_ref_cnt = total_ref_cnt; + } + } + } + sel_xcp_id = fpriv->xcp_id; + + if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { + *num_scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; + *scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; + atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); + dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id); + } else { + dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id); + return -ENOENT; + } + + return 0; +} + +static void amdgpu_set_xcp_id(struct amdgpu_device *adev, + uint32_t inst_idx, + struct amdgpu_ring *ring) +{ + int xcp_id; + enum AMDGPU_XCP_IP_BLOCK ip_blk; + uint32_t inst_mask; + + ring->xcp_id = AMDGPU_XCP_NO_PARTITION; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; + if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || + (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) + return; + + inst_mask = 1 << inst_idx; + + switch (ring->funcs->type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + ip_blk = AMDGPU_XCP_GFX; + break; + case AMDGPU_RING_TYPE_SDMA: + ip_blk = AMDGPU_XCP_SDMA; + break; + case AMDGPU_RING_TYPE_VCN_ENC: + case AMDGPU_RING_TYPE_VCN_JPEG: + ip_blk = AMDGPU_XCP_VCN; + break; + default: + dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type); + return; + } + + for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { + if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { + ring->xcp_id = xcp_id; + dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, + ring->xcp_id); + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; + break; + } + } +} + +static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int sel_xcp_id) +{ + unsigned int *num_gpu_sched; + + num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] + .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; + adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] + .sched[(*num_gpu_sched)++] = &ring->sched; + dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d", + ring->name, sel_xcp_id, ring->funcs->type, + ring->hw_prio, *num_gpu_sched); +} + +static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < MAX_XCP; i++) { + atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); + memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); + } + + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + ring = adev->rings[i]; + if (!ring || !ring->sched.ready || ring->no_scheduler) + continue; + + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id); + + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ + if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst)) + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); + } + + return 0; +} + +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + amdgpu_set_xcp_id(adev, ring->xcc_id, ring); + else + amdgpu_set_xcp_id(adev, ring->me, ring); + } + + return amdgpu_xcp_sched_list_update(adev); +} + +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + /*====================== xcp sysfs - configuration ======================*/ #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 454b33f889fb..1928d9e224fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -39,6 +39,8 @@ #define AMDGPU_XCP_NO_PARTITION (~0) +#define AMDGPU_XCP_OPS_KFD (1 << 0) + struct amdgpu_fpriv; enum AMDGPU_XCP_IP_BLOCK { @@ -110,6 +112,7 @@ struct amdgpu_xcp { struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; struct amdgpu_xcp_mgr *xcp_mgr; struct kobject kobj; + uint64_t unique_id; }; struct amdgpu_xcp_mgr { @@ -144,10 +147,6 @@ struct amdgpu_xcp_mgr_funcs { int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); - int (*select_scheds)(struct amdgpu_device *adev, - u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds); - int (*update_partition_sched_list)(struct amdgpu_device *adev); }; int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -176,19 +175,18 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, struct drm_file *file_priv); void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); - +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds); +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev); +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev); void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev); -#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->select_scheds ? \ - (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT) -#define amdgpu_xcp_update_partition_sched_list(adev) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->update_partition_sched_list ? \ - (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0) - static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { if (!xcp_mgr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index d9ad37711c3e..1ede308a7c67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1771,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): - adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT; + /* 25 GT/s */ + adev->gmc.xgmi.max_speed = 25; adev->gmc.xgmi.max_width = 16; break; case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0): - adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT; + /* 32 GT/s */ + adev->gmc.xgmi.max_speed = 32; adev->gmc.xgmi.max_width = 16; break; default: break; } } + +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width) +{ + adev->gmc.xgmi.max_speed = max_speed; + adev->gmc.xgmi.max_width = max_width; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index f994be985f42..5f36aff17e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -25,12 +25,6 @@ #include <drm/task_barrier.h> #include "amdgpu_ras.h" -enum amdgpu_xgmi_link_speed { - XGMI_SPEED_16GT = 16, - XGMI_SPEED_25GT = 25, - XGMI_SPEED_32GT = 32 -}; - struct amdgpu_hive_info { struct kobject kobj; uint64_t hive_id; @@ -97,7 +91,7 @@ struct amdgpu_xgmi { struct ras_common_if *ras_if; bool connected_to_cpu; struct amdgpu_xgmi_ras *ras; - enum amdgpu_xgmi_link_speed max_speed; + uint16_t max_speed; uint8_t max_width; }; @@ -130,4 +124,10 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num); void amdgpu_xgmi_early_init(struct amdgpu_device *adev); uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev); +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width); + +/* Cleanup macro for use with __free(xgmi_put_hive) */ +DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T)) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 92ca13097aaa..3a79ed7d8031 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -113,7 +113,8 @@ union amd_sriov_reg_access_flags { uint32_t vf_reg_access_mmhub : 1; uint32_t vf_reg_access_gc : 1; uint32_t vf_reg_access_l1_tlb_cntl : 1; - uint32_t reserved : 28; + uint32_t vf_reg_access_sq_config : 1; + uint32_t reserved : 27; } flags; uint32_t all; }; @@ -404,12 +405,17 @@ struct amd_sriov_ras_cper_dump { uint32_t buf[]; }; +struct amd_sriov_ras_chk_criti { + uint32_t hit; +}; + struct amdsriov_ras_telemetry { struct amd_sriov_ras_telemetry_header header; union { struct amd_sriov_ras_telemetry_error_count error_count; struct amd_sriov_ras_cper_dump cper_dump; + struct amd_sriov_ras_chk_criti chk_criti; } body; }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 1c083304ae77..811124ff88a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -29,12 +29,11 @@ #include "gfx_v9_4_3.h" #include "gfxhub_v1_2.h" #include "sdma_v4_4_2.h" +#include "amdgpu_ip.h" #define XCP_INST_MASK(num_inst, xcp_id) \ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) -#define AMDGPU_XCP_OPS_KFD (1 << 0) - void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { int i; @@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } -static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) -{ - return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); -} - -static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, - uint32_t inst_idx, struct amdgpu_ring *ring) -{ - int xcp_id; - enum AMDGPU_XCP_IP_BLOCK ip_blk; - uint32_t inst_mask; - - ring->xcp_id = AMDGPU_XCP_NO_PARTITION; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; - if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || - (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) - return; - - inst_mask = 1 << inst_idx; - - switch (ring->funcs->type) { - case AMDGPU_HW_IP_GFX: - case AMDGPU_RING_TYPE_COMPUTE: - case AMDGPU_RING_TYPE_KIQ: - ip_blk = AMDGPU_XCP_GFX; - break; - case AMDGPU_RING_TYPE_SDMA: - ip_blk = AMDGPU_XCP_SDMA; - break; - case AMDGPU_RING_TYPE_VCN_ENC: - case AMDGPU_RING_TYPE_VCN_JPEG: - ip_blk = AMDGPU_XCP_VCN; - break; - default: - DRM_ERROR("Not support ring type %d!", ring->funcs->type); - return; - } - - for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { - if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { - ring->xcp_id = xcp_id; - dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, - ring->xcp_id); - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; - break; - } - } -} - -static void aqua_vanjaram_xcp_gpu_sched_update( - struct amdgpu_device *adev, - struct amdgpu_ring *ring, - unsigned int sel_xcp_id) -{ - unsigned int *num_gpu_sched; - - num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] - .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; - adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] - .sched[(*num_gpu_sched)++] = &ring->sched; - DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, - sel_xcp_id, ring->funcs->type, - ring->hw_prio, *num_gpu_sched); -} - -static int aqua_vanjaram_xcp_sched_list_update( - struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - int i; - - for (i = 0; i < MAX_XCP; i++) { - atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); - memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); - } - - if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - ring = adev->rings[i]; - if (!ring || !ring->sched.ready || ring->no_scheduler) - continue; - - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - - /* VCN may be shared by two partitions under CPX MODE in certain - * configs. - */ - if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - aqua_vanjaram_xcp_vcn_shared(adev)) - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); - } - - return 0; -} - -static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || - ring->funcs->type == AMDGPU_RING_TYPE_KIQ) - aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); - else - aqua_vanjaram_set_xcp_id(adev, ring->me, ring); - } - - return aqua_vanjaram_xcp_sched_list_update(adev); -} - -static int aqua_vanjaram_select_scheds( - struct amdgpu_device *adev, - u32 hw_ip, - u32 hw_prio, - struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, - struct drm_gpu_scheduler ***scheds) -{ - u32 sel_xcp_id; - int i; - - if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { - u32 least_ref_cnt = ~0; - - fpriv->xcp_id = 0; - for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { - u32 total_ref_cnt; - - total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); - if (total_ref_cnt < least_ref_cnt) { - fpriv->xcp_id = i; - least_ref_cnt = total_ref_cnt; - } - } - } - sel_xcp_id = fpriv->xcp_id; - - if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { - *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; - *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; - atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); - DRM_DEBUG("Selected partition #%d", sel_xcp_id); - } else { - DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); - return -ENOENT; - } - - return 0; -} - -static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - int8_t inst) -{ - int8_t dev_inst; - - switch (block) { - case GC_HWIP: - case SDMA0_HWIP: - /* Both JPEG and VCN as JPEG is only alias of VCN */ - case VCN_HWIP: - dev_inst = adev->ip_map.dev_inst[block][inst]; - break; - default: - /* For rest of the IPs, no look up required. - * Assume 'logical instance == physical instance' for all configs. */ - dev_inst = inst; - break; - } - - return dev_inst; -} - -static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - uint32_t mask) -{ - uint32_t dev_mask = 0; - int8_t log_inst, dev_inst; - - while (mask) { - log_inst = ffs(mask) - 1; - dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); - dev_mask |= (1 << dev_inst); - mask &= ~(1 << log_inst); - } - - return dev_mask; -} - -static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, - enum amd_hw_ip_block_type ip_block, - uint32_t inst_mask) -{ - int l = 0, i; - - while (inst_mask) { - i = ffs(inst_mask) - 1; - adev->ip_map.dev_inst[ip_block][l++] = i; - inst_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[ip_block][l] = -1; -} - -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) -{ - u32 ip_map[][2] = { - { GC_HWIP, adev->gfx.xcc_mask }, - { SDMA0_HWIP, adev->sdma.sdma_mask }, - { VCN_HWIP, adev->vcn.inst_mask }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(ip_map); ++i) - aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); - - adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; - adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; -} - /* Fixed pattern for smn addressing on different AIDs: * bit[34]: indicate cross AID access * bit[33:32]: indicate target AID id @@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (adev->nbio.funcs->get_compute_partition_mode) { mode = adev->nbio.funcs->get_compute_partition_mode(adev); - if (mode != derv_mode) + if (mode != derv_mode) { dev_warn( adev->dev, "Mismatch in compute partition mode - reported : %d derived : %d", mode, derv_mode); + if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_device_bus_status_check(adev); + } } return mode; @@ -453,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, uint16_t *nps_modes) { struct amdgpu_device *adev = xcp_mgr->adev; + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) return -EINVAL; @@ -476,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcp = 4; *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); + if (gc_ver == IP_VERSION(9, 5, 0)) + *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; case AMDGPU_CPX_PARTITION_MODE: *num_xcp = NUM_XCC(adev->gfx.xcc_mask); *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); - if (amdgpu_sriov_vf(adev)) + if (gc_ver == IP_VERSION(9, 5, 0)) *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; default: @@ -593,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, return false; } -static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ - - if (flags & AMDGPU_XCP_OPS_KFD) - amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); - - return 0; -} - -static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - int ret = 0; - - if (flags & AMDGPU_XCP_OPS_KFD) { - amdgpu_amdkfd_device_probe(xcp_mgr->adev); - amdgpu_amdkfd_device_init(xcp_mgr->adev); - /* If KFD init failed, return failure */ - if (!xcp_mgr->adev->kfd.init_complete) - ret = -EIO; - } - - return ret; -} - -static void -__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) -{ - struct amdgpu_device *adev = xcp_mgr->adev; - - xcp_mgr->supp_xcp_modes = 0; - - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_QPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 6: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_TPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 4: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - /* this seems only existing in emulation phase */ - case 2: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 1: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - - default: - break; - } -} - static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) { int mode; @@ -705,7 +416,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto out; } - ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags); if (ret) goto unlock; @@ -718,7 +429,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcps = num_xcc / num_xcc_per_xcp; amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); - ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags); if (!ret) __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: @@ -801,9 +512,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, - .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = - &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -818,7 +526,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; - __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); + amdgpu_xcp_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; @@ -858,7 +566,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) if (ret) return ret; - aqua_vanjaram_ip_map_init(adev); + amdgpu_ip_map_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 427b073de2fc..7a063e44d429 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, ectx.last_jump_jiffies = 0; if (ws) { ectx.ws = kcalloc(4, ws, GFP_KERNEL); + if (!ectx.ws) { + ret = -ENOMEM; + goto free; + } ectx.ws_size = ws; } else { ectx.ws = NULL; @@ -1494,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx) } } +static void atom_get_vbios_build(struct atom_context *ctx) +{ + unsigned char *atom_rom_hdr; + unsigned char *str; + uint16_t base, len; + + base = CU16(ATOM_ROM_TABLE_PTR); + atom_rom_hdr = CSTR(base); + + str = CSTR(CU16(base + ATOM_ROM_CFG_PTR)); + /* Skip config string */ + while (str < atom_rom_hdr && *str++) + ; + /* Skip change list string */ + while (str < atom_rom_hdr && *str++) + ; + + len = min(atom_rom_hdr - str, STRLEN_NORMAL); + if (len) + strscpy(ctx->build_num, str, len); +} + struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) { int base; @@ -1554,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) atom_get_vbios_pn(ctx); atom_get_vbios_date(ctx); atom_get_vbios_version(ctx); + atom_get_vbios_build(ctx); return ctx; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index b807f6639a4c..825ff28731f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -37,6 +37,7 @@ struct drm_device; #define ATOM_ROM_MAGIC "ATOM" #define ATOM_ROM_MAGIC_PTR 4 +#define ATOM_ROM_CFG_PTR 0xC #define ATOM_ROM_MSG_PTR 0x10 #define ATOM_ROM_CMD_PTR 0x1E #define ATOM_ROM_DATA_PTR 0x20 @@ -151,6 +152,7 @@ struct atom_context { uint32_t version; uint8_t vbios_ver_str[STRLEN_NORMAL]; uint8_t date[STRLEN_NORMAL]; + uint8_t build_num[STRLEN_NORMAL]; }; extern int amdgpu_atom_debug; diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c new file mode 100644 index 000000000000..96616a865aac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "cyan_skillfish_ip_offset.h" + +int cyan_skillfish_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocke needed by driver */ + uint32_t i; + + adev->gfx.xcc_mask = 1; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index bf7c22f81cda..72ca6538b2e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, /* save values for DPM */ amdgpu_crtc->line_time = line_time; - amdgpu_crtc->wm_high = latency_watermark_a; - amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } @@ -1462,17 +1461,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev) static void dce_v10_0_audio_fini(struct amdgpu_device *adev) { - int i; - if (!amdgpu_audio) return; if (!adev->mode_info.audio.enabled) return; - for (i = 0; i < adev->mode_info.audio.num_pins; i++) - dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - adev->mode_info.audio.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c deleted file mode 100644 index 47e05783c4a0..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ /dev/null @@ -1,3823 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <drm/drm_edid.h> -#include <drm/drm_fourcc.h> -#include <drm/drm_modeset_helper.h> -#include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_vblank.h> - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_i2c.h" -#include "vid.h" -#include "atom.h" -#include "amdgpu_atombios.h" -#include "atombios_crtc.h" -#include "atombios_encoders.h" -#include "amdgpu_pll.h" -#include "amdgpu_connectors.h" -#include "amdgpu_display.h" -#include "dce_v11_0.h" - -#include "dce/dce_11_0_d.h" -#include "dce/dce_11_0_sh_mask.h" -#include "dce/dce_11_0_enum.h" -#include "oss/oss_3_0_d.h" -#include "oss/oss_3_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "ivsrcid/ivsrcid_vislands30.h" - -static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); -static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); -static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd); - -static const u32 crtc_offsets[] = -{ - CRTC0_REGISTER_OFFSET, - CRTC1_REGISTER_OFFSET, - CRTC2_REGISTER_OFFSET, - CRTC3_REGISTER_OFFSET, - CRTC4_REGISTER_OFFSET, - CRTC5_REGISTER_OFFSET, - CRTC6_REGISTER_OFFSET -}; - -static const u32 hpd_offsets[] = -{ - HPD0_REGISTER_OFFSET, - HPD1_REGISTER_OFFSET, - HPD2_REGISTER_OFFSET, - HPD3_REGISTER_OFFSET, - HPD4_REGISTER_OFFSET, - HPD5_REGISTER_OFFSET -}; - -static const uint32_t dig_offsets[] = { - DIG0_REGISTER_OFFSET, - DIG1_REGISTER_OFFSET, - DIG2_REGISTER_OFFSET, - DIG3_REGISTER_OFFSET, - DIG4_REGISTER_OFFSET, - DIG5_REGISTER_OFFSET, - DIG6_REGISTER_OFFSET, - DIG7_REGISTER_OFFSET, - DIG8_REGISTER_OFFSET -}; - -static const struct { - uint32_t reg; - uint32_t vblank; - uint32_t vline; - uint32_t hpd; - -} interrupt_status_offsets[] = { { - .reg = mmDISP_INTERRUPT_STATUS, - .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK -} }; - -static const u32 cz_golden_settings_a11[] = -{ - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, - mmFBC_MISC, 0x1f311fff, 0x14300000, -}; - -static const u32 cz_mgcg_cgcg_init[] = -{ - mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, - mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, -}; - -static const u32 stoney_golden_settings_a11[] = -{ - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, - mmFBC_MISC, 0x1f311fff, 0x14302000, -}; - -static const u32 polaris11_golden_settings_a11[] = -{ - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, - mmFBC_DEBUG1, 0xffffffff, 0x00000008, - mmFBC_MISC, 0x9f313fff, 0x14302008, - mmHDMI_CONTROL, 0x313f031f, 0x00000011, -}; - -static const u32 polaris10_golden_settings_a11[] = -{ - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, - mmFBC_MISC, 0x9f313fff, 0x14302008, - mmHDMI_CONTROL, 0x313f031f, 0x00000011, -}; - -static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) -{ - switch (adev->asic_type) { - case CHIP_CARRIZO: - amdgpu_device_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_device_program_register_sequence(adev, - cz_golden_settings_a11, - ARRAY_SIZE(cz_golden_settings_a11)); - break; - case CHIP_STONEY: - amdgpu_device_program_register_sequence(adev, - stoney_golden_settings_a11, - ARRAY_SIZE(stoney_golden_settings_a11)); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - amdgpu_device_program_register_sequence(adev, - polaris11_golden_settings_a11, - ARRAY_SIZE(polaris11_golden_settings_a11)); - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - amdgpu_device_program_register_sequence(adev, - polaris10_golden_settings_a11, - ARRAY_SIZE(polaris10_golden_settings_a11)); - break; - default: - break; - } -} - -static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, - u32 block_offset, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); - r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); - - return r; -} - -static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, - u32 block_offset, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); -} - -static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) -{ - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) - return 0; - else - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); -} - -static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) -{ - unsigned i; - - /* Enable pflip interrupts */ - for (i = 0; i < adev->mode_info.num_crtc; i++) - amdgpu_irq_get(adev, &adev->pageflip_irq, i); -} - -static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) -{ - unsigned i; - - /* Disable pflip interrupts */ - for (i = 0; i < adev->mode_info.num_crtc; i++) - amdgpu_irq_put(adev, &adev->pageflip_irq, i); -} - -/** - * dce_v11_0_page_flip - pageflip callback. - * - * @adev: amdgpu_device pointer - * @crtc_id: crtc to cleanup pageflip on - * @crtc_base: new address of the crtc (GPU MC address) - * @async: asynchronous flip - * - * Triggers the actual pageflip by updating the primary - * surface base address. - */ -static void dce_v11_0_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base, bool async) -{ - struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; - struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; - u32 tmp; - - /* flip immediate for async, default is vsync */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* update pitch */ - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, - fb->pitches[0] / fb->format->cpp[0]); - /* update the scanout addresses */ - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(crtc_base)); - /* writing to the low address triggers the update */ - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - lower_32_bits(crtc_base)); - /* post the write */ - RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); -} - -static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, - u32 *vbl, u32 *position) -{ - if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) - return -EINVAL; - - *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); - *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); - - return 0; -} - -/** - * dce_v11_0_hpd_sense - hpd sense callback. - * - * @adev: amdgpu_device pointer - * @hpd: hpd (hotplug detect) pin - * - * Checks if a digital monitor is connected (evergreen+). - * Returns true if connected, false if not connected. - */ -static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - bool connected = false; - - if (hpd >= adev->mode_info.num_hpd) - return connected; - - if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & - DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) - connected = true; - - return connected; -} - -/** - * dce_v11_0_hpd_set_polarity - hpd set polarity callback. - * - * @adev: amdgpu_device pointer - * @hpd: hpd (hotplug detect) pin - * - * Set the polarity of the hpd pin (evergreen+). - */ -static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - u32 tmp; - bool connected = dce_v11_0_hpd_sense(adev, hpd); - - if (hpd >= adev->mode_info.num_hpd) - return; - - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - if (connected) - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); - else - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); -} - -/** - * dce_v11_0_hpd_init - hpd setup callback. - * - * @adev: amdgpu_device pointer - * - * Setup the hpd pins used by the card (evergreen+). - * Enable the pin, set the polarity, and enable the hpd interrupts. - */ -static void dce_v11_0_hpd_init(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u32 tmp; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) - continue; - - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - continue; - } - - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, - DC_HPD_CONNECT_INT_DELAY, - AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, - DC_HPD_DISCONNECT_INT_DELAY, - AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); - WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); - dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); - amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); - } - drm_connector_list_iter_end(&iter); -} - -/** - * dce_v11_0_hpd_fini - hpd tear down callback. - * - * @adev: amdgpu_device pointer - * - * Tear down the hpd pins used by the card (evergreen+). - * Disable the hpd interrupts. - */ -static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u32 tmp; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) - continue; - - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); - } - drm_connector_list_iter_end(&iter); -} - -static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) -{ - return mmDC_GPIO_HPD_A; -} - -static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) -{ - u32 crtc_hung = 0; - u32 crtc_status[6]; - u32 i, j, tmp; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { - crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); - crtc_hung |= (1 << i); - } - } - - for (j = 0; j < 10; j++) { - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (crtc_hung & (1 << i)) { - tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); - if (tmp != crtc_status[i]) - crtc_hung &= ~(1 << i); - } - } - if (crtc_hung == 0) - return false; - udelay(100); - } - - return true; -} - -static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, - bool render) -{ - u32 tmp; - - /* Lockout access through VGA aperture*/ - tmp = RREG32(mmVGA_HDP_CONTROL); - if (render) - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); - else - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - WREG32(mmVGA_HDP_CONTROL, tmp); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - if (render) - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); - else - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); -} - -static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) -{ - int num_crtc = 0; - - switch (adev->asic_type) { - case CHIP_CARRIZO: - num_crtc = 3; - break; - case CHIP_STONEY: - num_crtc = 2; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - num_crtc = 6; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - num_crtc = 5; - break; - default: - num_crtc = 0; - } - return num_crtc; -} - -void dce_v11_0_disable_dce(struct amdgpu_device *adev) -{ - /*Disable VGA render and enabled crtc, if has DCE engine*/ - if (amdgpu_atombios_has_dce_engine_info(adev)) { - u32 tmp; - int crtc_enabled, i; - - dce_v11_0_set_vga_render_state(adev, false); - - /*Disable crtc*/ - for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - } - } -} - -static void dce_v11_0_program_fmt(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - int bpc = 0; - u32 tmp = 0; - enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; - - if (connector) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - bpc = amdgpu_connector_get_monitor_bpc(connector); - dither = amdgpu_connector->dither; - } - - /* LVDS/eDP FMT is set up by atom */ - if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) - return; - - /* not needed for analog */ - if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || - (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) - return; - - if (bpc == 0) - return; - - switch (bpc) { - case 6: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); - } - break; - case 8: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); - } - break; - case 10: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); - } - break; - default: - /* not needed */ - break; - } - - WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - - -/* display watermark setup */ -/** - * dce_v11_0_line_buffer_adjust - Set up the line buffer - * - * @adev: amdgpu_device pointer - * @amdgpu_crtc: the selected display controller - * @mode: the current display mode on the selected display - * controller - * - * Setup up the line buffer allocation for - * the selected display controller (CIK). - * Returns the line buffer size in pixels. - */ -static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, - struct amdgpu_crtc *amdgpu_crtc, - struct drm_display_mode *mode) -{ - u32 tmp, buffer_alloc, i, mem_cfg; - u32 pipe_offset = amdgpu_crtc->crtc_id; - /* - * Line Buffer Setup - * There are 6 line buffers, one for each display controllers. - * There are 3 partitions per LB. Select the number of partitions - * to enable based on the display width. For display widths larger - * than 4096, you need use to use 2 display controllers and combine - * them using the stereo blender. - */ - if (amdgpu_crtc->base.enabled && mode) { - if (mode->crtc_hdisplay < 1920) { - mem_cfg = 1; - buffer_alloc = 2; - } else if (mode->crtc_hdisplay < 2560) { - mem_cfg = 2; - buffer_alloc = 2; - } else if (mode->crtc_hdisplay < 4096) { - mem_cfg = 0; - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; - } else { - DRM_DEBUG_KMS("Mode too big for LB!\n"); - mem_cfg = 0; - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; - } - } else { - mem_cfg = 1; - buffer_alloc = 0; - } - - tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); - WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); - tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); - WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); - if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) - break; - udelay(1); - } - - if (amdgpu_crtc->base.enabled && mode) { - switch (mem_cfg) { - case 0: - default: - return 4096 * 2; - case 1: - return 1920 * 2; - case 2: - return 2560 * 2; - } - } - - /* controller not enabled, so no lb used */ - return 0; -} - -/** - * cik_get_number_of_dram_channels - get the number of dram channels - * - * @adev: amdgpu_device pointer - * - * Look up the number of video ram channels (CIK). - * Used for display watermark bandwidth calculations - * Returns the number of dram channels - */ -static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) -{ - u32 tmp = RREG32(mmMC_SHARED_CHMAP); - - switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { - case 0: - default: - return 1; - case 1: - return 2; - case 2: - return 4; - case 3: - return 8; - case 4: - return 3; - case 5: - return 6; - case 6: - return 10; - case 7: - return 12; - case 8: - return 16; - } -} - -struct dce10_wm_params { - u32 dram_channels; /* number of dram channels */ - u32 yclk; /* bandwidth per dram data pin in kHz */ - u32 sclk; /* engine clock in kHz */ - u32 disp_clk; /* display clock in kHz */ - u32 src_width; /* viewport width */ - u32 active_time; /* active display time in ns */ - u32 blank_time; /* blank time in ns */ - bool interlaced; /* mode is interlaced */ - fixed20_12 vsc; /* vertical scale ratio */ - u32 num_heads; /* number of active crtcs */ - u32 bytes_per_pixel; /* bytes per pixel display + overlay */ - u32 lb_size; /* line buffer allocated to pipe */ - u32 vtaps; /* vertical scaler taps */ -}; - -/** - * dce_v11_0_dram_bandwidth - get the dram bandwidth - * - * @wm: watermark calculation data - * - * Calculate the raw dram bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns the dram bandwidth in MBytes/s - */ -static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate raw DRAM Bandwidth */ - fixed20_12 dram_efficiency; /* 0.7 */ - fixed20_12 yclk, dram_channels, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - yclk.full = dfixed_const(wm->yclk); - yclk.full = dfixed_div(yclk, a); - dram_channels.full = dfixed_const(wm->dram_channels * 4); - a.full = dfixed_const(10); - dram_efficiency.full = dfixed_const(7); - dram_efficiency.full = dfixed_div(dram_efficiency, a); - bandwidth.full = dfixed_mul(dram_channels, yclk); - bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display - * - * @wm: watermark calculation data - * - * Calculate the dram bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the dram bandwidth for display in MBytes/s - */ -static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) -{ - /* Calculate DRAM Bandwidth and the part allocated to display. */ - fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ - fixed20_12 yclk, dram_channels, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - yclk.full = dfixed_const(wm->yclk); - yclk.full = dfixed_div(yclk, a); - dram_channels.full = dfixed_const(wm->dram_channels * 4); - a.full = dfixed_const(10); - disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ - disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); - bandwidth.full = dfixed_mul(dram_channels, yclk); - bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_data_return_bandwidth - get the data return bandwidth - * - * @wm: watermark calculation data - * - * Calculate the data return bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the data return bandwidth in MBytes/s - */ -static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the display Data return Bandwidth */ - fixed20_12 return_efficiency; /* 0.8 */ - fixed20_12 sclk, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - sclk.full = dfixed_const(wm->sclk); - sclk.full = dfixed_div(sclk, a); - a.full = dfixed_const(10); - return_efficiency.full = dfixed_const(8); - return_efficiency.full = dfixed_div(return_efficiency, a); - a.full = dfixed_const(32); - bandwidth.full = dfixed_mul(a, sclk); - bandwidth.full = dfixed_mul(bandwidth, return_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth - * - * @wm: watermark calculation data - * - * Calculate the dmif bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the dmif bandwidth in MBytes/s - */ -static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the DMIF Request Bandwidth */ - fixed20_12 disp_clk_request_efficiency; /* 0.8 */ - fixed20_12 disp_clk, bandwidth; - fixed20_12 a, b; - - a.full = dfixed_const(1000); - disp_clk.full = dfixed_const(wm->disp_clk); - disp_clk.full = dfixed_div(disp_clk, a); - a.full = dfixed_const(32); - b.full = dfixed_mul(a, disp_clk); - - a.full = dfixed_const(10); - disp_clk_request_efficiency.full = dfixed_const(8); - disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); - - bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_available_bandwidth - get the min available bandwidth - * - * @wm: watermark calculation data - * - * Calculate the min available bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the min available bandwidth in MBytes/s - */ -static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ - u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); - u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); - u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); - - return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); -} - -/** - * dce_v11_0_average_bandwidth - get the average available bandwidth - * - * @wm: watermark calculation data - * - * Calculate the average available bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the average available bandwidth in MBytes/s - */ -static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the display mode Average Bandwidth - * DisplayMode should contain the source and destination dimensions, - * timing, etc. - */ - fixed20_12 bpp; - fixed20_12 line_time; - fixed20_12 src_width; - fixed20_12 bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - line_time.full = dfixed_const(wm->active_time + wm->blank_time); - line_time.full = dfixed_div(line_time, a); - bpp.full = dfixed_const(wm->bytes_per_pixel); - src_width.full = dfixed_const(wm->src_width); - bandwidth.full = dfixed_mul(src_width, bpp); - bandwidth.full = dfixed_mul(bandwidth, wm->vsc); - bandwidth.full = dfixed_div(bandwidth, line_time); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_latency_watermark - get the latency watermark - * - * @wm: watermark calculation data - * - * Calculate the latency watermark (CIK). - * Used for display watermark bandwidth calculations - * Returns the latency watermark in ns - */ -static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) -{ - /* First calculate the latency in ns */ - u32 mc_latency = 2000; /* 2000 ns. */ - u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); - u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; - u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; - u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ - u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + - (wm->num_heads * cursor_line_pair_return_time); - u32 latency = mc_latency + other_heads_data_return_time + dc_latency; - u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; - u32 tmp, dmif_size = 12288; - fixed20_12 a, b, c; - - if (wm->num_heads == 0) - return 0; - - a.full = dfixed_const(2); - b.full = dfixed_const(1); - if ((wm->vsc.full > a.full) || - ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || - (wm->vtaps >= 5) || - ((wm->vsc.full >= a.full) && wm->interlaced)) - max_src_lines_per_dst_line = 4; - else - max_src_lines_per_dst_line = 2; - - a.full = dfixed_const(available_bandwidth); - b.full = dfixed_const(wm->num_heads); - a.full = dfixed_div(a, b); - tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); - tmp = min(dfixed_trunc(a), tmp); - - lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); - - a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); - b.full = dfixed_const(1000); - c.full = dfixed_const(lb_fill_bw); - b.full = dfixed_div(c, b); - a.full = dfixed_div(a, b); - line_fill_time = dfixed_trunc(a); - - if (line_fill_time < wm->active_time) - return latency; - else - return latency + (line_fill_time - wm->active_time); - -} - -/** - * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check - * average and available dram bandwidth - * - * @wm: watermark calculation data - * - * Check if the display average bandwidth fits in the display - * dram bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) -{ - if (dce_v11_0_average_bandwidth(wm) <= - (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) - return true; - else - return false; -} - -/** - * dce_v11_0_average_bandwidth_vs_available_bandwidth - check - * average and available bandwidth - * - * @wm: watermark calculation data - * - * Check if the display average bandwidth fits in the display - * available bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) -{ - if (dce_v11_0_average_bandwidth(wm) <= - (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) - return true; - else - return false; -} - -/** - * dce_v11_0_check_latency_hiding - check latency hiding - * - * @wm: watermark calculation data - * - * Check latency hiding (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) -{ - u32 lb_partitions = wm->lb_size / wm->src_width; - u32 line_time = wm->active_time + wm->blank_time; - u32 latency_tolerant_lines; - u32 latency_hiding; - fixed20_12 a; - - a.full = dfixed_const(1); - if (wm->vsc.full > a.full) - latency_tolerant_lines = 1; - else { - if (lb_partitions <= (wm->vtaps + 1)) - latency_tolerant_lines = 1; - else - latency_tolerant_lines = 2; - } - - latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); - - if (dce_v11_0_latency_watermark(wm) <= latency_hiding) - return true; - else - return false; -} - -/** - * dce_v11_0_program_watermarks - program display watermarks - * - * @adev: amdgpu_device pointer - * @amdgpu_crtc: the selected display controller - * @lb_size: line buffer size - * @num_heads: number of display controllers in use - * - * Calculate and program the display watermarks for the - * selected display controller (CIK). - */ -static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, - struct amdgpu_crtc *amdgpu_crtc, - u32 lb_size, u32 num_heads) -{ - struct drm_display_mode *mode = &amdgpu_crtc->base.mode; - struct dce10_wm_params wm_low, wm_high; - u32 active_time; - u32 line_time = 0; - u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask, lb_vblank_lead_lines = 0; - - if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, - (u32)mode->clock); - line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, - (u32)mode->clock); - line_time = min_t(u32, line_time, 65535); - - /* watermark for high clocks */ - if (adev->pm.dpm_enabled) { - wm_high.yclk = - amdgpu_dpm_get_mclk(adev, false) * 10; - wm_high.sclk = - amdgpu_dpm_get_sclk(adev, false) * 10; - } else { - wm_high.yclk = adev->pm.current_mclk * 10; - wm_high.sclk = adev->pm.current_sclk * 10; - } - - wm_high.disp_clk = mode->clock; - wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = active_time; - wm_high.blank_time = line_time - wm_high.active_time; - wm_high.interlaced = false; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm_high.interlaced = true; - wm_high.vsc = amdgpu_crtc->vsc; - wm_high.vtaps = 1; - if (amdgpu_crtc->rmx_type != RMX_OFF) - wm_high.vtaps = 2; - wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm_high.lb_size = lb_size; - wm_high.dram_channels = cik_get_number_of_dram_channels(adev); - wm_high.num_heads = num_heads; - - /* set for high clocks */ - latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535); - - /* possibly force display priority to high */ - /* should really do this at mode validation time... */ - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || - !dce_v11_0_check_latency_hiding(&wm_high) || - (adev->mode_info.disp_priority == 2)) { - DRM_DEBUG_KMS("force priority to high\n"); - } - - /* watermark for low clocks */ - if (adev->pm.dpm_enabled) { - wm_low.yclk = - amdgpu_dpm_get_mclk(adev, true) * 10; - wm_low.sclk = - amdgpu_dpm_get_sclk(adev, true) * 10; - } else { - wm_low.yclk = adev->pm.current_mclk * 10; - wm_low.sclk = adev->pm.current_sclk * 10; - } - - wm_low.disp_clk = mode->clock; - wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = active_time; - wm_low.blank_time = line_time - wm_low.active_time; - wm_low.interlaced = false; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm_low.interlaced = true; - wm_low.vsc = amdgpu_crtc->vsc; - wm_low.vtaps = 1; - if (amdgpu_crtc->rmx_type != RMX_OFF) - wm_low.vtaps = 2; - wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm_low.lb_size = lb_size; - wm_low.dram_channels = cik_get_number_of_dram_channels(adev); - wm_low.num_heads = num_heads; - - /* set for low clocks */ - latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535); - - /* possibly force display priority to high */ - /* should really do this at mode validation time... */ - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || - !dce_v11_0_check_latency_hiding(&wm_low) || - (adev->mode_info.disp_priority == 2)) { - DRM_DEBUG_KMS("force priority to high\n"); - } - lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); - } - - /* select wm A */ - wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* select wm B */ - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* restore original selection */ - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); - - /* save values for DPM */ - amdgpu_crtc->line_time = line_time; - amdgpu_crtc->wm_high = latency_watermark_a; - amdgpu_crtc->wm_low = latency_watermark_b; - /* Save number of lines the linebuffer leads before the scanout */ - amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; -} - -/** - * dce_v11_0_bandwidth_update - program display watermarks - * - * @adev: amdgpu_device pointer - * - * Calculate and program the display watermarks and line - * buffer allocation (CIK). - */ -static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) -{ - struct drm_display_mode *mode = NULL; - u32 num_heads = 0, lb_size; - int i; - - amdgpu_display_update_priority(adev); - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->mode_info.crtcs[i]->base.enabled) - num_heads++; - } - for (i = 0; i < adev->mode_info.num_crtc; i++) { - mode = &adev->mode_info.crtcs[i]->base.mode; - lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); - dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], - lb_size, num_heads); - } -} - -static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) -{ - int i; - u32 offset, tmp; - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - offset = adev->mode_info.audio.pin[i].offset; - tmp = RREG32_AUDIO_ENDPT(offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); - if (((tmp & - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) - adev->mode_info.audio.pin[i].connected = false; - else - adev->mode_info.audio.pin[i].connected = true; - } -} - -static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) -{ - int i; - - dce_v11_0_audio_get_connected_pins(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - if (adev->mode_info.audio.pin[i].connected) - return &adev->mode_info.audio.pin[i]; - } - DRM_ERROR("No connected audio pins found!\n"); - return NULL; -} - -static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) -{ - struct amdgpu_device *adev = drm_to_adev(encoder->dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - u32 tmp; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); - WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); -} - -static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - u32 tmp; - int interlace = 0; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - interlace = 1; - if (connector->latency_present[interlace]) { - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - VIDEO_LIPSYNC, connector->video_latency[interlace]); - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - AUDIO_LIPSYNC, connector->audio_latency[interlace]); - } else { - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - VIDEO_LIPSYNC, 0); - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - AUDIO_LIPSYNC, 0); - } - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); -} - -static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - u32 tmp; - u8 *sadb = NULL; - int sad_count; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb); - if (sad_count < 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - sad_count = 0; - } - - /* program the speaker allocation */ - tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - DP_CONNECTION, 0); - /* set HDMI mode */ - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - HDMI_CONNECTION, 1); - if (sad_count) - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - SPEAKER_ALLOCATION, sadb[0]); - else - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - SPEAKER_ALLOCATION, 5); /* stereo */ - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); - - kfree(sadb); -} - -static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - struct cea_sad *sads; - int i, sad_count; - - static const u16 eld_reg_to_type[][2] = { - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, - }; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads); - if (sad_count < 0) - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); - if (sad_count <= 0) - return; - BUG_ON(!sads); - - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { - u32 tmp = 0; - u8 stereo_freqs = 0; - int max_channels = -1; - int j; - - for (j = 0; j < sad_count; j++) { - struct cea_sad *sad = &sads[j]; - - if (sad->format == eld_reg_to_type[i][1]) { - if (sad->channels > max_channels) { - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - MAX_CHANNELS, sad->channels); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - DESCRIPTOR_BYTE_2, sad->byte2); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES, sad->freq); - max_channels = sad->channels; - } - - if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) - stereo_freqs |= sad->freq; - else - break; - } - } - - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); - } - - kfree(sads); -} - -static void dce_v11_0_audio_enable(struct amdgpu_device *adev, - struct amdgpu_audio_pin *pin, - bool enable) -{ - if (!pin) - return; - - WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, - enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); -} - -static const u32 pin_offsets[] = -{ - AUD0_REGISTER_OFFSET, - AUD1_REGISTER_OFFSET, - AUD2_REGISTER_OFFSET, - AUD3_REGISTER_OFFSET, - AUD4_REGISTER_OFFSET, - AUD5_REGISTER_OFFSET, - AUD6_REGISTER_OFFSET, - AUD7_REGISTER_OFFSET, -}; - -static int dce_v11_0_audio_init(struct amdgpu_device *adev) -{ - int i; - - if (!amdgpu_audio) - return 0; - - adev->mode_info.audio.enabled = true; - - switch (adev->asic_type) { - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->mode_info.audio.num_pins = 7; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - adev->mode_info.audio.num_pins = 8; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - adev->mode_info.audio.num_pins = 6; - break; - default: - return -EINVAL; - } - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - adev->mode_info.audio.pin[i].channels = -1; - adev->mode_info.audio.pin[i].rate = -1; - adev->mode_info.audio.pin[i].bits_per_sample = -1; - adev->mode_info.audio.pin[i].status_bits = 0; - adev->mode_info.audio.pin[i].category_code = 0; - adev->mode_info.audio.pin[i].connected = false; - adev->mode_info.audio.pin[i].offset = pin_offsets[i]; - adev->mode_info.audio.pin[i].id = i; - /* disable audio. it will be set up later */ - /* XXX remove once we switch to ip funcs */ - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - return 0; -} - -static void dce_v11_0_audio_fini(struct amdgpu_device *adev) -{ - int i; - - if (!amdgpu_audio) - return; - - if (!adev->mode_info.audio.enabled) - return; - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - - adev->mode_info.audio.enabled = false; -} - -/* - * update the N and CTS parameters for a given pixel clock rate - */ -static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - u32 tmp; - - tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); - WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); - WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); - WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); - WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); - WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); - WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); - -} - -/* - * build a HDMI Video Info Frame - */ -static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, - void *buffer, size_t size) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - uint8_t *frame = buffer + 3; - uint8_t *header = buffer; - - WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, - frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); - WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, - frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); - WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, - frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); - WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, - frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); -} - -static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - u32 dto_phase = 24 * 1000; - u32 dto_modulo = clock; - u32 tmp; - - if (!dig || !dig->afmt) - return; - - /* XXX two dtos; generally use dto0 for hdmi */ - /* Express [24MHz / target pixel clock] as an exact rational - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator - */ - tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); - tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, - amdgpu_crtc->crtc_id); - WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); - WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); - WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); -} - -/* - * update the info frames with the data from the current display mode - */ -static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; - struct hdmi_avi_infoframe frame; - ssize_t err; - u32 tmp; - int bpc = 8; - - if (!dig || !dig->afmt) - return; - - /* Silent, r600_hdmi_enable will raise WARN for us */ - if (!dig->afmt->enabled) - return; - - /* hdmi deep color mode general control packets setup, if bpc > 8 */ - if (encoder->crtc) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - bpc = amdgpu_crtc->bpc; - } - - /* disable audio prior to setting up hw */ - dig->afmt->pin = dce_v11_0_audio_get_pin(adev); - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); - - dce_v11_0_audio_set_dto(encoder, mode->clock); - - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ - - WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); - - tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); - switch (bpc) { - case 0: - case 6: - case 8: - case 16: - default: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); - DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", - connector->name, bpc); - break; - case 10: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); - DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", - connector->name); - break; - case 12: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); - DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", - connector->name); - break; - } - WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* enable audio info frames (frames won't be set until audio is enabled) */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); - WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); - /* anything other than 0 */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); - - WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ - - tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* set the default audio delay */ - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); - /* should be suffient for all audio modes and small enough for all hblanks */ - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); - WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* allow 60958 channel status fields to be updated */ - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); - if (bpc > 8) - /* clear SW CTS value */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); - else - /* select SW CTS value */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); - /* allow hw to sent ACR packets when required */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); - WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); - - dce_v11_0_afmt_update_ACR(encoder, mode->clock); - - tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); - WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); - WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); - WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); - - dce_v11_0_audio_write_speaker_allocation(encoder); - - WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, - (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); - - dce_v11_0_afmt_audio_select_pin(encoder); - dce_v11_0_audio_write_sad_regs(encoder); - dce_v11_0_audio_write_latency_fields(encoder, mode); - - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); - if (err < 0) { - DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); - return; - } - - err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); - if (err < 0) { - DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); - return; - } - - dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* enable AVI info frames */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* send audio packets */ - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); - WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); - WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); - WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); - - /* enable audio after to setting up hw */ - dce_v11_0_audio_enable(adev, dig->afmt->pin, true); -} - -static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - if (!dig || !dig->afmt) - return; - - /* Silent, r600_hdmi_enable will raise WARN for us */ - if (enable && dig->afmt->enabled) - return; - if (!enable && !dig->afmt->enabled) - return; - - if (!enable && dig->afmt->pin) { - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); - dig->afmt->pin = NULL; - } - - dig->afmt->enabled = enable; - - DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", - enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); -} - -static int dce_v11_0_afmt_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->mode_info.num_dig; i++) - adev->mode_info.afmt[i] = NULL; - - /* DCE11 has audio blocks tied to DIG encoders */ - for (i = 0; i < adev->mode_info.num_dig; i++) { - adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); - if (adev->mode_info.afmt[i]) { - adev->mode_info.afmt[i]->offset = dig_offsets[i]; - adev->mode_info.afmt[i]->id = i; - } else { - int j; - for (j = 0; j < i; j++) { - kfree(adev->mode_info.afmt[j]); - adev->mode_info.afmt[j] = NULL; - } - return -ENOMEM; - } - } - return 0; -} - -static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->mode_info.num_dig; i++) { - kfree(adev->mode_info.afmt[i]); - adev->mode_info.afmt[i] = NULL; - } -} - -static const u32 vga_control_regs[6] = -{ - mmD1VGA_CONTROL, - mmD2VGA_CONTROL, - mmD3VGA_CONTROL, - mmD4VGA_CONTROL, - mmD5VGA_CONTROL, - mmD6VGA_CONTROL, -}; - -static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u32 vga_control; - - vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; - if (enable) - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); - else - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); -} - -static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - if (enable) - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); - else - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); -} - -static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, int atomic) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct drm_framebuffer *target_fb; - struct drm_gem_object *obj; - struct amdgpu_bo *abo; - uint64_t fb_location, tiling_flags; - uint32_t fb_format, fb_pitch_pixels; - u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); - u32 pipe_config; - u32 tmp, viewport_w, viewport_h; - int r; - bool bypass_lut = false; - - /* no fb bound */ - if (!atomic && !crtc->primary->fb) { - DRM_DEBUG_KMS("No FB bound\n"); - return 0; - } - - if (atomic) - target_fb = fb; - else - target_fb = crtc->primary->fb; - - /* If atomic, assume fb object is pinned & idle & fenced and - * just update base pointers - */ - obj = target_fb->obj[0]; - abo = gem_to_amdgpu_bo(obj); - r = amdgpu_bo_reserve(abo, false); - if (unlikely(r != 0)) - return r; - - if (!atomic) { - abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(abo); - return -EINVAL; - } - } - fb_location = amdgpu_bo_gpu_offset(abo); - - amdgpu_bo_get_tiling_flags(abo, &tiling_flags); - amdgpu_bo_unreserve(abo); - - pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - - switch (target_fb->format->format) { - case DRM_FORMAT_C8: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); - break; - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_ARGB4444: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_BGRA5551: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_RGB565: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ - bypass_lut = true; - break; - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_BGRA1010102: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ - bypass_lut = true; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - break; - default: - DRM_ERROR("Unsupported screen format %p4cc\n", - &target_fb->format->format); - return -EINVAL; - } - - if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { - unsigned bankw, bankh, mtaspect, tile_split, num_banks; - - bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); - bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); - mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); - tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); - num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, - ARRAY_2D_TILED_THIN1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, - tile_split); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, - mtaspect); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, - ADDR_SURF_MICRO_TILING_DISPLAY); - } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, - ARRAY_1D_TILED_THIN1); - } - - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, - pipe_config); - - dce_v11_0_vga_enable(crtc, false); - - /* Make sure surface address is updated at vertical blank rather than - * horizontal blank - */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(fb_location)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(fb_location)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); - WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); - - /* - * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT - * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to - * retain the full precision throughout the pipeline. - */ - tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); - if (bypass_lut) - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); - else - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); - WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); - - if (bypass_lut) - DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); - - WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); - WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - - fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); - - dce_v11_0_grph_enable(crtc, true); - - WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, - target_fb->height); - - x &= ~3; - y &= ~1; - WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, - (x << 16) | y); - viewport_w = crtc->mode.hdisplay; - viewport_h = (crtc->mode.vdisplay + 1) & ~1; - WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, - (viewport_w << 16) | viewport_h); - - /* set pageflip to happen anywhere in vblank interval */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); - - if (!atomic && fb && fb != crtc->primary->fb) { - abo = gem_to_amdgpu_bo(fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) - return r; - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - - /* Bytes per pixel may have changed */ - dce_v11_0_bandwidth_update(adev); - - return 0; -} - -static void dce_v11_0_set_interleave(struct drm_crtc *crtc, - struct drm_display_mode *mode) -{ - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - u32 tmp; - - tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); - else - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); - WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); -} - -static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u16 *r, *g, *b; - int i; - u32 tmp; - - DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); - - tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); - WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); - WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); - WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); - - WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); - - WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); - WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); - WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); - - WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); - - WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); - r = crtc->gamma_store; - g = r + crtc->gamma_size; - b = g + crtc->gamma_size; - for (i = 0; i < 256; i++) { - WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - ((*r++ & 0xffc0) << 14) | - ((*g++ & 0xffc0) << 4) | - (*b++ >> 6)); - } - - tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); - WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); - WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); - WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); - WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - /* XXX match this to the depth of the crtc fmt block, move to modeset? */ - WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); - /* XXX this only needs to be programmed once per crtc at startup, - * not sure where the best place for it is - */ - tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); - WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return 1; - else - return 0; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return 3; - else - return 2; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return 5; - else - return 4; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - return 6; - default: - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); - return 0; - } -} - -/** - * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. - * - * @crtc: drm crtc - * - * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors - * a single PPLL can be used for all DP crtcs/encoders. For non-DP - * monitors a dedicated PPLL must be used. If a particular board has - * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming - * as there is no need to program the PLL itself. If we are not able to - * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to - * avoid messing up an existing monitor. - * - * Asic specific PLL information - * - * DCE 10.x - * Tonga - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) - * CI - * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC - * - */ -static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u32 pll_in_use; - int pll; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - struct amdgpu_encoder *amdgpu_encoder = - to_amdgpu_encoder(amdgpu_crtc->encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) - return ATOM_DP_DTO; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return ATOM_COMBOPHY_PLL1; - else - return ATOM_COMBOPHY_PLL0; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return ATOM_COMBOPHY_PLL3; - else - return ATOM_COMBOPHY_PLL2; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return ATOM_COMBOPHY_PLL5; - else - return ATOM_COMBOPHY_PLL4; - default: - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); - return ATOM_PPLL_INVALID; - } - } - - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { - if (adev->clock.dp_extclk) - /* skip PPLL programming if using ext clock */ - return ATOM_PPLL_INVALID; - else { - /* use the same PPLL for all DP monitors */ - pll = amdgpu_pll_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = amdgpu_pll_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - - /* XXX need to determine what plls are available on each DCE11 part */ - pll_in_use = amdgpu_pll_get_use_mask(crtc); - if (adev->flags & AMD_IS_APU) { - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL0))) - return ATOM_PPLL0; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } else { - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL0))) - return ATOM_PPLL0; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } - return ATOM_PPLL_INVALID; -} - -static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) -{ - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - uint32_t cur_lock; - - cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); - if (lock) - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); - else - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); - WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); -} - -static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - u32 tmp; - - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static void dce_v11_0_show_cursor(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - u32 tmp; - - WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(amdgpu_crtc->cursor_addr)); - WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - lower_32_bits(amdgpu_crtc->cursor_addr)); - - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, - int x, int y) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - int xorigin = 0, yorigin = 0; - - amdgpu_crtc->cursor_x = x; - amdgpu_crtc->cursor_y = y; - - /* avivo cursor are offset into the total surface */ - x += crtc->x; - y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); - - if (x < 0) { - xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); - x = 0; - } - if (y < 0) { - yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); - y = 0; - } - - WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); - WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); - - return 0; -} - -static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - int ret; - - dce_v11_0_lock_cursor(crtc, true); - ret = dce_v11_0_cursor_move_locked(crtc, x, y); - dce_v11_0_lock_cursor(crtc, false); - - return ret; -} - -static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height, - int32_t hot_x, - int32_t hot_y) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_gem_object *obj; - struct amdgpu_bo *aobj; - int ret; - - if (!handle) { - /* turn off cursor */ - dce_v11_0_hide_cursor(crtc); - obj = NULL; - goto unpin; - } - - if ((width > amdgpu_crtc->max_cursor_width) || - (height > amdgpu_crtc->max_cursor_height)) { - DRM_ERROR("bad cursor width or height %d x %d\n", width, height); - return -EINVAL; - } - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) { - DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); - return -ENOENT; - } - - aobj = gem_to_amdgpu_bo(obj); - ret = amdgpu_bo_reserve(aobj, false); - if (ret != 0) { - drm_gem_object_put(obj); - return ret; - } - - aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_bo_unreserve(aobj); - if (ret) { - DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put(obj); - return ret; - } - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - - dce_v11_0_lock_cursor(crtc, true); - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height || - hot_x != amdgpu_crtc->cursor_hot_x || - hot_y != amdgpu_crtc->cursor_hot_y) { - int x, y; - - x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; - y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; - - dce_v11_0_cursor_move_locked(crtc, x, y); - - amdgpu_crtc->cursor_width = width; - amdgpu_crtc->cursor_height = height; - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - dce_v11_0_show_cursor(crtc); - dce_v11_0_lock_cursor(crtc, false); - -unpin: - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - ret = amdgpu_bo_reserve(aobj, true); - if (likely(ret == 0)) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); - } - drm_gem_object_put(amdgpu_crtc->cursor_bo); - } - - amdgpu_crtc->cursor_bo = obj; - return 0; -} - -static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo) { - dce_v11_0_lock_cursor(crtc, true); - - dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, - amdgpu_crtc->cursor_y); - - dce_v11_0_show_cursor(crtc); - - dce_v11_0_lock_cursor(crtc, false); - } -} - -static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t size, - struct drm_modeset_acquire_ctx *ctx) -{ - dce_v11_0_crtc_load_lut(crtc); - - return 0; -} - -static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - drm_crtc_cleanup(crtc); - kfree(amdgpu_crtc); -} - -static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { - .cursor_set2 = dce_v11_0_crtc_cursor_set2, - .cursor_move = dce_v11_0_crtc_cursor_move, - .gamma_set = dce_v11_0_crtc_gamma_set, - .set_config = amdgpu_display_crtc_set_config, - .destroy = dce_v11_0_crtc_destroy, - .page_flip_target = amdgpu_display_crtc_page_flip_target, - .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = amdgpu_enable_vblank_kms, - .disable_vblank = amdgpu_disable_vblank_kms, - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, -}; - -static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - unsigned type; - - switch (mode) { - case DRM_MODE_DPMS_ON: - amdgpu_crtc->enabled = true; - amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); - dce_v11_0_vga_enable(crtc, true); - amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); - dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK and PFLIP interrupts are still enabled */ - type = amdgpu_display_crtc_idx_to_irq_type(adev, - amdgpu_crtc->crtc_id); - amdgpu_irq_update(adev, &adev->crtc_irq, type); - amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_crtc_vblank_on(crtc); - dce_v11_0_crtc_load_lut(crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - drm_crtc_vblank_off(crtc); - if (amdgpu_crtc->enabled) { - dce_v11_0_vga_enable(crtc, true); - amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); - dce_v11_0_vga_enable(crtc, false); - } - amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); - amdgpu_crtc->enabled = false; - break; - } - /* adjust pm to dpms */ - amdgpu_dpm_compute_clocks(adev); -} - -static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) -{ - /* disable crtc pair power gating before programming */ - amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); - amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) -{ - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); - amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); -} - -static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_atom_ss ss; - int i; - - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (crtc->primary->fb) { - int r; - struct amdgpu_bo *abo; - - abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r)) - DRM_ERROR("failed to reserve abo before unpin\n"); - else { - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - } - /* disable the GRPH */ - dce_v11_0_grph_enable(crtc, false); - - amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->mode_info.crtcs[i] && - adev->mode_info.crtcs[i]->enabled && - i != amdgpu_crtc->crtc_id && - amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { - /* one other crtc is using this pll don't turn - * off the pll - */ - goto done; - } - } - - switch (amdgpu_crtc->pll_id) { - case ATOM_PPLL0: - case ATOM_PPLL1: - case ATOM_PPLL2: - /* disable the ppll */ - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); - break; - case ATOM_COMBOPHY_PLL0: - case ATOM_COMBOPHY_PLL1: - case ATOM_COMBOPHY_PLL2: - case ATOM_COMBOPHY_PLL3: - case ATOM_COMBOPHY_PLL4: - case ATOM_COMBOPHY_PLL5: - /* disable the ppll */ - amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); - break; - default: - break; - } -done: - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->adjusted_clock = 0; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; -} - -static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - if (!amdgpu_crtc->adjusted_clock) - return -EINVAL; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - struct amdgpu_encoder *amdgpu_encoder = - to_amdgpu_encoder(amdgpu_crtc->encoder); - int encoder_mode = - amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); - - /* SetPixelClock calculates the plls and ss values now */ - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, - amdgpu_crtc->pll_id, - encoder_mode, amdgpu_encoder->encoder_id, - adjusted_mode->clock, 0, 0, 0, 0, - amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss); - } else { - amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); - } - amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); - dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); - amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); - amdgpu_atombios_crtc_scaler_setup(crtc); - dce_v11_0_cursor_reset(crtc); - /* update the hw version fpr dpm */ - amdgpu_crtc->hw_mode = *adjusted_mode; - - return 0; -} - -static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_encoder *encoder; - - /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - amdgpu_crtc->encoder = encoder; - amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); - break; - } - } - if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; - return false; - } - if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) - return false; - if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) - return false; - /* pick pll */ - amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); - /* if we can't get a PPLL for a non-DP encoder, fail */ - if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && - !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) - return false; - - return true; -} - -static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); -} - -static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, enum mode_set_atomic state) -{ - return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); -} - -static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { - .dpms = dce_v11_0_crtc_dpms, - .mode_fixup = dce_v11_0_crtc_mode_fixup, - .mode_set = dce_v11_0_crtc_mode_set, - .mode_set_base = dce_v11_0_crtc_set_base, - .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, - .prepare = dce_v11_0_crtc_prepare, - .commit = dce_v11_0_crtc_commit, - .disable = dce_v11_0_crtc_disable, - .get_scanout_position = amdgpu_crtc_get_scanout_position, -}; - -static void dce_v11_0_panic_flush(struct drm_plane *plane) -{ - struct drm_framebuffer *fb; - struct amdgpu_crtc *amdgpu_crtc; - struct amdgpu_device *adev; - uint32_t fb_format; - - if (!plane->fb) - return; - - fb = plane->fb; - amdgpu_crtc = to_amdgpu_crtc(plane->crtc); - adev = drm_to_adev(fb->dev); - - /* Disable DC tiling */ - fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset); - fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK; - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); - -} - -static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = { - .get_scanout_buffer = amdgpu_display_get_scanout_buffer, - .panic_flush = dce_v11_0_panic_flush, -}; - -static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) -{ - struct amdgpu_crtc *amdgpu_crtc; - - amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + - (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); - if (amdgpu_crtc == NULL) - return -ENOMEM; - - drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); - - drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); - amdgpu_crtc->crtc_id = index; - adev->mode_info.crtcs[index] = amdgpu_crtc; - - amdgpu_crtc->max_cursor_width = 128; - amdgpu_crtc->max_cursor_height = 128; - adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - - switch (amdgpu_crtc->crtc_id) { - case 0: - default: - amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; - break; - case 1: - amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; - break; - case 2: - amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; - break; - case 3: - amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; - break; - case 4: - amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; - break; - case 5: - amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; - break; - } - - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->adjusted_clock = 0; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; - drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); - drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs); - - return 0; -} - -static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; - adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; - - dce_v11_0_set_display_funcs(adev); - - adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); - - switch (adev->asic_type) { - case CHIP_CARRIZO: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 9; - break; - case CHIP_STONEY: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 9; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 6; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - adev->mode_info.num_hpd = 5; - adev->mode_info.num_dig = 5; - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; - } - - dce_v11_0_set_irq_funcs(adev); - - return 0; -} - -static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) -{ - int r, i; - struct amdgpu_device *adev = ip_block->adev; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); - if (r) - return r; - } - - for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); - if (r) - return r; - } - - /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); - if (r) - return r; - - adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - - adev_to_drm(adev)->mode_config.async_page_flip = true; - - adev_to_drm(adev)->mode_config.max_width = 16384; - adev_to_drm(adev)->mode_config.max_height = 16384; - - adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; - - adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - - r = amdgpu_display_modeset_create_props(adev); - if (r) - return r; - - adev_to_drm(adev)->mode_config.max_width = 16384; - adev_to_drm(adev)->mode_config.max_height = 16384; - - - /* allocate crtcs */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = dce_v11_0_crtc_init(adev, i); - if (r) - return r; - } - - if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev_to_drm(adev)); - else - return -EINVAL; - - /* setup afmt */ - r = dce_v11_0_afmt_init(adev); - if (r) - return r; - - r = dce_v11_0_audio_init(adev); - if (r) - return r; - - /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ - adev_to_drm(adev)->vblank_disable_immediate = true; - - r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); - if (r) - return r; - - INIT_DELAYED_WORK(&adev->hotplug_work, - amdgpu_display_hotplug_work_func); - - drm_kms_helper_poll_init(adev_to_drm(adev)); - - adev->mode_info.mode_config_initialized = true; - return 0; -} - -static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - drm_edid_free(adev->mode_info.bios_hardcoded_edid); - - drm_kms_helper_poll_fini(adev_to_drm(adev)); - - dce_v11_0_audio_fini(adev); - - dce_v11_0_afmt_fini(adev); - - drm_mode_config_cleanup(adev_to_drm(adev)); - adev->mode_info.mode_config_initialized = false; - - return 0; -} - -static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) -{ - int i; - struct amdgpu_device *adev = ip_block->adev; - - dce_v11_0_init_golden_registers(adev); - - /* disable vga render */ - dce_v11_0_set_vga_render_state(adev, false); - /* init dig PHYs, disp eng pll */ - amdgpu_atombios_crtc_powergate_init(adev); - amdgpu_atombios_encoder_init_dig(adev); - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, - DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); - amdgpu_atombios_crtc_set_dce_clock(adev, 0, - DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS); - } else { - amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); - } - - /* initialize hpd */ - dce_v11_0_hpd_init(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - dce_v11_0_pageflip_interrupt_init(adev); - - return 0; -} - -static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) -{ - int i; - struct amdgpu_device *adev = ip_block->adev; - - dce_v11_0_hpd_fini(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - dce_v11_0_pageflip_interrupt_fini(adev); - - flush_delayed_work(&adev->hotplug_work); - - return 0; -} - -static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int r; - - r = amdgpu_display_suspend_helper(adev); - if (r) - return r; - - adev->mode_info.bl_level = - amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - - return dce_v11_0_hw_fini(ip_block); -} - -static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int ret; - - amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, - adev->mode_info.bl_level); - - ret = dce_v11_0_hw_init(ip_block); - - /* turn on the BL */ - if (adev->mode_info.bl_encoder) { - u8 bl_level = amdgpu_display_backlight_get_level(adev, - adev->mode_info.bl_encoder); - amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, - bl_level); - } - if (ret) - return ret; - - return amdgpu_display_resume_helper(adev); -} - -static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block) -{ - return true; -} - -static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) -{ - u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = ip_block->adev; - - if (dce_v11_0_is_display_hung(adev)) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - return 0; -} - -static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) -{ - u32 lb_interrupt_mask; - - if (crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VBLANK_INTERRUPT_MASK, 0); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - case AMDGPU_IRQ_STATE_ENABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VBLANK_INTERRUPT_MASK, 1); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - default: - break; - } -} - -static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) -{ - u32 lb_interrupt_mask; - - if (crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VLINE_INTERRUPT_MASK, 0); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - case AMDGPU_IRQ_STATE_ENABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VLINE_INTERRUPT_MASK, 1); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - default: - break; - } -} - -static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned hpd, - enum amdgpu_interrupt_state state) -{ - u32 tmp; - - if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hpd %d\n", hpd); - return 0; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); - break; - case AMDGPU_IRQ_STATE_ENABLE: - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); - break; - default: - break; - } - - return 0; -} - -static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - switch (type) { - case AMDGPU_CRTC_IRQ_VBLANK1: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK2: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK3: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK4: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK5: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK6: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); - break; - case AMDGPU_CRTC_IRQ_VLINE1: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); - break; - case AMDGPU_CRTC_IRQ_VLINE2: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); - break; - case AMDGPU_CRTC_IRQ_VLINE3: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); - break; - case AMDGPU_CRTC_IRQ_VLINE4: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); - break; - case AMDGPU_CRTC_IRQ_VLINE5: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); - break; - case AMDGPU_CRTC_IRQ_VLINE6: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); - break; - default: - break; - } - return 0; -} - -static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 reg; - - if (type >= adev->mode_info.num_crtc) { - DRM_ERROR("invalid pageflip crtc %d\n", type); - return -EINVAL; - } - - reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); - if (state == AMDGPU_IRQ_STATE_DISABLE) - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], - reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); - else - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], - reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); - - return 0; -} - -static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - unsigned long flags; - unsigned crtc_id; - struct amdgpu_crtc *amdgpu_crtc; - struct amdgpu_flip_work *works; - - crtc_id = (entry->src_id - 8) >> 1; - amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; - - if (crtc_id >= adev->mode_info.num_crtc) { - DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); - return -EINVAL; - } - - if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) - WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); - - /* IRQ could occur when in initial stage */ - if(amdgpu_crtc == NULL) - return 0; - - spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - works = amdgpu_crtc->pflip_works; - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ - DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " - "AMDGPU_FLIP_SUBMITTED(%d)\n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - return 0; - } - - /* page flip completed. clean up */ - amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; - amdgpu_crtc->pflip_works = NULL; - - /* wakeup usersapce */ - if(works->event) - drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - - drm_crtc_vblank_put(&amdgpu_crtc->base); - schedule_work(&works->unpin_work); - - return 0; -} - -static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, - int hpd) -{ - u32 tmp; - - if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hpd %d\n", hpd); - return; - } - - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); -} - -static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, - int crtc) -{ - u32 tmp; - - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); - tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); - WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); -} - -static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, - int crtc) -{ - u32 tmp; - - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); - tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); - WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); -} - -static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - unsigned crtc = entry->src_id - 1; - uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); - unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, - crtc); - - switch (entry->src_data[0]) { - case 0: /* vblank */ - if (disp_int & interrupt_status_offsets[crtc].vblank) - dce_v11_0_crtc_vblank_int_ack(adev, crtc); - else - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - - if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev_to_drm(adev), crtc); - } - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - - break; - case 1: /* vline */ - if (disp_int & interrupt_status_offsets[crtc].vline) - dce_v11_0_crtc_vline_int_ack(adev, crtc); - else - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - - DRM_DEBUG("IH: D%d vline\n", crtc + 1); - - break; - default: - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); - break; - } - - return 0; -} - -static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - uint32_t disp_int, mask; - unsigned hpd; - - if (entry->src_data[0] >= adev->mode_info.num_hpd) { - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); - return 0; - } - - hpd = entry->src_data[0]; - disp_int = RREG32(interrupt_status_offsets[hpd].reg); - mask = interrupt_status_offsets[hpd].hpd; - - if (disp_int & mask) { - dce_v11_0_hpd_int_ack(adev, hpd); - schedule_delayed_work(&adev->hotplug_work, 0); - DRM_DEBUG("IH: HPD%d\n", hpd + 1); - } - - return 0; -} - -static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, - enum amd_clockgating_state state) -{ - return 0; -} - -static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, - enum amd_powergating_state state) -{ - return 0; -} - -static const struct amd_ip_funcs dce_v11_0_ip_funcs = { - .name = "dce_v11_0", - .early_init = dce_v11_0_early_init, - .sw_init = dce_v11_0_sw_init, - .sw_fini = dce_v11_0_sw_fini, - .hw_init = dce_v11_0_hw_init, - .hw_fini = dce_v11_0_hw_fini, - .suspend = dce_v11_0_suspend, - .resume = dce_v11_0_resume, - .is_idle = dce_v11_0_is_idle, - .soft_reset = dce_v11_0_soft_reset, - .set_clockgating_state = dce_v11_0_set_clockgating_state, - .set_powergating_state = dce_v11_0_set_powergating_state, -}; - -static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - amdgpu_encoder->pixel_clock = adjusted_mode->clock; - - /* need to call this here rather than in prepare() since we need some crtc info */ - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - /* set scaler clears this on some chips */ - dce_v11_0_set_interleave(encoder->crtc, mode); - - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { - dce_v11_0_afmt_enable(encoder, true); - dce_v11_0_afmt_setmode(encoder, adjusted_mode); - } -} - -static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) -{ - struct amdgpu_device *adev = drm_to_adev(encoder->dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - - if ((amdgpu_encoder->active_device & - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || - (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != - ENCODER_OBJECT_ID_NONE)) { - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - if (dig) { - dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); - if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) - dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; - } - } - - amdgpu_atombios_scratch_regs_lock(adev, true); - - if (connector) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - /* select the clock/data port if it uses a router */ - if (amdgpu_connector->router.cd_valid) - amdgpu_i2c_router_select_cd_port(amdgpu_connector); - - /* turn eDP panel on for mode set */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - amdgpu_atombios_encoder_set_edp_panel_power(connector, - ATOM_TRANSMITTER_ACTION_POWER_ON); - } - - /* this is needed for the pll/ss setup to work correctly in some cases */ - amdgpu_atombios_encoder_set_crtc_source(encoder); - /* set up the FMT blocks */ - dce_v11_0_program_fmt(encoder); -} - -static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - /* need to call this here as we need the crtc set up */ - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); - amdgpu_atombios_scratch_regs_lock(adev, false); -} - -static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig; - - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - if (amdgpu_atombios_encoder_is_digital(encoder)) { - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) - dce_v11_0_afmt_enable(encoder, false); - dig = amdgpu_encoder->enc_priv; - dig->dig_encoder = -1; - } - amdgpu_encoder->active_device = 0; -} - -/* these are handled by the primary encoders */ -static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) -{ - -} - -static void dce_v11_0_ext_commit(struct drm_encoder *encoder) -{ - -} - -static void -dce_v11_0_ext_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - -} - -static void dce_v11_0_ext_disable(struct drm_encoder *encoder) -{ - -} - -static void -dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) -{ - -} - -static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { - .dpms = dce_v11_0_ext_dpms, - .prepare = dce_v11_0_ext_prepare, - .mode_set = dce_v11_0_ext_mode_set, - .commit = dce_v11_0_ext_commit, - .disable = dce_v11_0_ext_disable, - /* no detect for TMDS/LVDS yet */ -}; - -static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { - .dpms = amdgpu_atombios_encoder_dpms, - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, - .prepare = dce_v11_0_encoder_prepare, - .mode_set = dce_v11_0_encoder_mode_set, - .commit = dce_v11_0_encoder_commit, - .disable = dce_v11_0_encoder_disable, - .detect = amdgpu_atombios_encoder_dig_detect, -}; - -static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { - .dpms = amdgpu_atombios_encoder_dpms, - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, - .prepare = dce_v11_0_encoder_prepare, - .mode_set = dce_v11_0_encoder_mode_set, - .commit = dce_v11_0_encoder_commit, - .detect = amdgpu_atombios_encoder_dac_detect, -}; - -static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); - kfree(amdgpu_encoder->enc_priv); - drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); -} - -static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { - .destroy = dce_v11_0_encoder_destroy, -}; - -static void dce_v11_0_encoder_add(struct amdgpu_device *adev, - uint32_t encoder_enum, - uint32_t supported_device, - u16 caps) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_encoder *encoder; - struct amdgpu_encoder *amdgpu_encoder; - - /* see if we already added it */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - amdgpu_encoder = to_amdgpu_encoder(encoder); - if (amdgpu_encoder->encoder_enum == encoder_enum) { - amdgpu_encoder->devices |= supported_device; - return; - } - - } - - /* add a new one */ - amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); - if (!amdgpu_encoder) - return; - - encoder = &amdgpu_encoder->base; - switch (adev->mode_info.num_crtc) { - case 1: - encoder->possible_crtcs = 0x1; - break; - case 2: - default: - encoder->possible_crtcs = 0x3; - break; - case 3: - encoder->possible_crtcs = 0x7; - break; - case 4: - encoder->possible_crtcs = 0xf; - break; - case 5: - encoder->possible_crtcs = 0x1f; - break; - case 6: - encoder->possible_crtcs = 0x3f; - break; - } - - amdgpu_encoder->enc_priv = NULL; - - amdgpu_encoder->encoder_enum = encoder_enum; - amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - amdgpu_encoder->devices = supported_device; - amdgpu_encoder->rmx_type = RMX_OFF; - amdgpu_encoder->underscan_type = UNDERSCAN_OFF; - amdgpu_encoder->is_ext_encoder = false; - amdgpu_encoder->caps = caps; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - amdgpu_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); - } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); - } else { - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); - } - drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); - break; - case ENCODER_OBJECT_ID_SI170B: - case ENCODER_OBJECT_ID_CH7303: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: - case ENCODER_OBJECT_ID_TITFP513: - case ENCODER_OBJECT_ID_VT1623: - case ENCODER_OBJECT_ID_HDMI_SI1930: - case ENCODER_OBJECT_ID_TRAVIS: - case ENCODER_OBJECT_ID_NUTMEG: - /* these are handled by the primary encoders */ - amdgpu_encoder->is_ext_encoder = true; - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - else - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); - break; - } -} - -static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { - .bandwidth_update = &dce_v11_0_bandwidth_update, - .vblank_get_counter = &dce_v11_0_vblank_get_counter, - .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, - .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, - .hpd_sense = &dce_v11_0_hpd_sense, - .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, - .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, - .page_flip = &dce_v11_0_page_flip, - .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, - .add_encoder = &dce_v11_0_encoder_add, - .add_connector = &amdgpu_connector_add, -}; - -static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) -{ - adev->mode_info.funcs = &dce_v11_0_display_funcs; -} - -static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { - .set = dce_v11_0_set_crtc_irq_state, - .process = dce_v11_0_crtc_irq, -}; - -static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { - .set = dce_v11_0_set_pageflip_irq_state, - .process = dce_v11_0_pageflip_irq, -}; - -static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { - .set = dce_v11_0_set_hpd_irq_state, - .process = dce_v11_0_hpd_irq, -}; - -static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) -{ - if (adev->mode_info.num_crtc > 0) - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; - else - adev->crtc_irq.num_types = 0; - adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; - - adev->pageflip_irq.num_types = adev->mode_info.num_crtc; - adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; - - adev->hpd_irq.num_types = adev->mode_info.num_hpd; - adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; -} - -const struct amdgpu_ip_block_version dce_v11_0_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_DCE, - .major = 11, - .minor = 0, - .rev = 0, - .funcs = &dce_v11_0_ip_funcs, -}; - -const struct amdgpu_ip_block_version dce_v11_2_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_DCE, - .major = 11, - .minor = 2, - .rev = 0, - .funcs = &dce_v11_0_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 276c025c4c03..acc887a58518 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1034,7 +1034,6 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, /* save values for DPM */ amdgpu_crtc->line_time = line_time; - amdgpu_crtc->wm_high = latency_watermark_a; /* Save number of lines the linebuffer leads before the scanout */ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; @@ -1451,17 +1450,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev) static void dce_v6_0_audio_fini(struct amdgpu_device *adev) { - int i; - if (!amdgpu_audio) return; if (!adev->mode_info.audio.enabled) return; - for (i = 0; i < adev->mode_info.audio.num_pins; i++) - dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - adev->mode_info.audio.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index e62ccf9eb73d..2ccd6aad8dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, /* save values for DPM */ amdgpu_crtc->line_time = line_time; - amdgpu_crtc->wm_high = latency_watermark_a; - amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } @@ -1443,17 +1442,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev) static void dce_v8_0_audio_fini(struct amdgpu_device *adev) { - int i; - if (!amdgpu_audio) return; if (!adev->mode_info.audio.enabled) return; - for (i = 0; i < adev->mode_info.audio.num_pins; i++) - dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - adev->mode_info.audio.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 75ea071744eb..8841d7213de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned int index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; memset(&ib, 0, sizeof(ib)); @@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; int ctx_reg_offset; @@ -4952,11 +4951,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) } } } - /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -7664,19 +7667,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) /* Disable MEC parsing/prefetching */ gfx_v10_0_cp_compute_enable(adev, false); - if (grbm_soft_reset) { - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); @@ -9046,21 +9047,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } -static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, mmSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -9522,7 +9508,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9532,15 +9520,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) u64 addr; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); - if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) { + if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) { spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; } @@ -9560,12 +9547,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 0, 1, 0x20); gfx_v10_0_ring_emit_reg_wait(kiq_ring, SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff); - kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; @@ -9575,11 +9559,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9587,12 +9585,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -9603,9 +9600,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; @@ -9641,13 +9637,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -9882,7 +9877,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, @@ -9923,7 +9917,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index ec9b84f92d46..66c47c466532 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; /* MES KIQ fw hasn't indirect buffer support for now */ @@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; int ctx_reg_offset; @@ -1612,9 +1611,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): if (!adev->gfx.disable_uq && - adev->gfx.me_fw_version >= 2390 && - adev->gfx.pfp_fw_version >= 2530 && - adev->gfx.mec_fw_version >= 2600 && + adev->gfx.me_fw_version >= 2420 && + adev->gfx.pfp_fw_version >= 2580 && + adev->gfx.mec_fw_version >= 2650 && adev->mes.fw_version[0] >= 120) { adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; @@ -1654,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) } } break; + case IP_VERSION(11, 0, 1): + case IP_VERSION(11, 0, 4): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.pfp_fw_version >= 102 && + adev->gfx.mec_fw_version >= 66 && + adev->mes.fw_version[0] >= 128) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; @@ -1806,12 +1820,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 2280) && - (adev->gfx.mec_fw_version >= 2410)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2410) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } break; default: + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } break; } @@ -4124,6 +4143,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #endif if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); + if (!prop->kernel_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -4276,8 +4297,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, prop->allow_tunneling); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->kernel_queue) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + } if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; @@ -4634,8 +4657,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; adev->gfxhub.funcs->set_fault_enable_default(adev, value); /* TODO investigate why this and the hdp flush above is needed, @@ -6283,21 +6305,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -6811,13 +6818,14 @@ static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); if (r) { @@ -6840,7 +6848,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) @@ -6973,13 +6981,14 @@ static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r = 0; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { @@ -7000,7 +7009,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -7236,7 +7245,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, @@ -7278,7 +7286,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 1234c8d64e20..710ec9c34e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { @@ -496,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; /* MES KIQ fw hasn't indirect buffer support for now */ @@ -586,7 +587,7 @@ out: static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); if (!amdgpu_sriov_vf(adev)) { - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, - AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -679,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0, clustercount = 0, i; const struct cs_section_def *sect = NULL; @@ -1542,10 +1547,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): if ((adev->gfx.me_fw_version >= 2660) && - (adev->gfx.mec_fw_version >= 2920)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2920) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } + break; + default: + break; } if (!adev->enable_mes_kiq) { @@ -3016,6 +3025,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #endif if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); + if (!prop->kernel_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -3165,8 +3176,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->kernel_queue) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + } if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; @@ -3510,8 +3523,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; adev->gfxhub.funcs->set_fault_enable_default(adev, value); /* TODO investigate why this and the hdp flush above is needed, @@ -4690,21 +4702,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5307,13 +5304,14 @@ static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); if (r) { @@ -5335,7 +5333,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring) @@ -5421,13 +5419,14 @@ static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { @@ -5448,7 +5447,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring) @@ -5526,7 +5525,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, @@ -5565,7 +5563,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 70d7a1f434c4..7693b7953426 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin"); MODULE_FIRMWARE("amdgpu/hainan_rlc.bin"); static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); -static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v6_0_init_pg(struct amdgpu_device *adev); @@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; - volatile u32 *dst_ptr; + u32 *dst_ptr; u32 dws; u64 reg_list_mc_addr; const struct cs_section_def *cs_data; @@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index da0534ff1271..5976ed55d9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = { }; static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); @@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; @@ -4884,76 +4883,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } -static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - -static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -5003,7 +4932,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, .emit_mem_sync = gfx_v7_0_emit_mem_sync, - .reset = gfx_v7_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bc983ecf3d99..0856ff65288c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1220,8 +1220,7 @@ out: return err; } -static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; @@ -6340,34 +6339,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, val); } -static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) { struct amdgpu_device *adev = ring->adev; @@ -6844,48 +6815,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } -static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6951,7 +6880,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .emit_wreg = gfx_v8_0_ring_emit_wreg, .soft_recovery = gfx_v8_0_ring_soft_recovery, .emit_mem_sync = gfx_v8_0_emit_mem_sync, - .reset = gfx_v8_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ad9be3656653..dd19a97436db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; @@ -2410,6 +2409,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { @@ -2648,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) !READ_ONCE(adev->barrier_has_auto_waitcnt)); WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); break; + case IP_VERSION(9, 4, 2): + gfx_v9_4_2_init_sq(adev); + break; default: break; } @@ -4170,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) /* Disable MEC parsing/prefetching */ gfx_v9_0_cp_compute_enable(adev, false); - if (grbm_soft_reset) { - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); @@ -7171,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v9_0_ring_emit_wreg(kiq_ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 7 + 5)) - return -ENOMEM; - gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v9_0_ring_emit_reg_wait(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff); - gfx_v9_0_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0); - - return amdgpu_ring_test_ring(ring); -} - static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -7225,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -7280,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { DRM_ERROR("fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -7496,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, - .reset = gfx_v9_0_reset_kgq, .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index c48cd47b531f..8058ea91ecaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, } } +void gfx_v9_4_2_init_sq(struct amdgpu_device *adev) +{ + uint32_t data; + + if (adev->gfx.mec_fw_version >= 98) { + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + data = RREG32_SOC15(GC, 0, regSQ_CONFIG1); + data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); + WREG32_SOC15(GC, 0, regSQ_CONFIG1, data); + } +} + void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, uint32_t first_vmid, uint32_t last_vmid) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h index 7584624b641c..a603724c1dfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h @@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, uint32_t first_vmid, uint32_t last_vmid); void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, uint32_t die_id); +void gfx_v9_4_2_init_sq(struct amdgpu_device *adev); void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c233edf60569..77f9d5b9a556 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): - if (adev->gfx.mec_fw_version >= 155) { + if ((adev->gfx.mec_fw_version >= 155) && + !amdgpu_sriov_vf(adev)) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } break; case IP_VERSION(9, 5, 0): - if (adev->gfx.mec_fw_version >= 21) { + if ((adev->gfx.mec_fw_version >= 21) && + !amdgpu_sriov_vf(adev)) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } @@ -1349,7 +1351,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { /* ToDo: GC 9.4.4 */ case IP_VERSION(9, 4, 3): - if (adev->gfx.mec_fw_version >= 184) + if (adev->gfx.mec_fw_version >= 184 && + (amdgpu_sriov_reg_access_sq_config(adev) || + !amdgpu_sriov_vf(adev))) adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; break; case IP_VERSION(9, 5, 0): @@ -2457,19 +2461,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) /* Disable MEC parsing/prefetching */ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); - if (grbm_soft_reset) { - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); @@ -3552,20 +3554,21 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) } static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; + int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE; unsigned long flags; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -3591,8 +3594,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); pipe_reset: - if(r) { + if (r) { + if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) + return -EOPNOTSUPP; r = gfx_v9_4_3_reset_hw_pipe(ring); + reset_mode = AMDGPU_RESET_TYPE_PER_PIPE; dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, r ? "failed" : "successfully"); if (r) @@ -3612,14 +3618,24 @@ pipe_reset: } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { + if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) + goto pipe_reset; + dev_err(adev->dev, "fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + + if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) { + r = amdgpu_ring_test_ring(ring); + if (r) + goto pipe_reset; + } + + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } enum amdgpu_gfx_cp_ras_mem_id { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index cb25f7f0dfc1..6c03bf9f1ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, uint32_t xcc_mask) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; int i; @@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, @@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, /* In the case squeezing vram into GART aperture, we don't use * FB aperture and AGP aperture. Disable them. */ - if (adev->gmc.pdb0_bo) { + if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) { WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index a3e2787501f1..d7499be8c4bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -469,24 +466,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int * 0 valid */ -static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) -{ - switch (flags) { - case AMDGPU_VM_MTYPE_DEFAULT: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_NC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_WC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC); - case AMDGPU_VM_MTYPE_CC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC); - case AMDGPU_VM_MTYPE_UC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC); - default: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - } -} - static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { @@ -511,21 +490,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - struct amdgpu_bo *bo = mapping->bo_va->base.bo; - - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { + case AMDGPU_VM_MTYPE_DEFAULT: + case AMDGPU_VM_MTYPE_NC: + default: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_WC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC); + break; + case AMDGPU_VM_MTYPE_CC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC); + break; + case AMDGPU_VM_MTYPE_UC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); + break; + } - *flags &= ~AMDGPU_PTE_NOALLOC; - *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC); + if (vm_flags & AMDGPU_VM_PAGE_NOALLOC) + *flags |= AMDGPU_PTE_NOALLOC; + else + *flags &= ~AMDGPU_PTE_NOALLOC; - if (mapping->flags & AMDGPU_PTE_PRT) { + if (vm_flags & AMDGPU_VM_PAGE_PRT) { *flags |= AMDGPU_PTE_PRT; *flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_LOG; @@ -566,7 +563,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, - .map_mtype = gmc_v10_0_map_mtype, .get_vm_pde = gmc_v10_0_get_vm_pde, .get_vm_pte = gmc_v10_0_get_vm_pte, .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size, @@ -967,8 +963,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) /* Flush HDP after it is initialized */ amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; if (!adev->in_s0ix) adev->gfxhub.funcs->set_fault_enable_default(adev, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 72211409227b..7bc389d9f5c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -433,24 +430,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int * 0 valid */ -static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) -{ - switch (flags) { - case AMDGPU_VM_MTYPE_DEFAULT: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_NC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_WC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC); - case AMDGPU_VM_MTYPE_CC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC); - case AMDGPU_VM_MTYPE_UC: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC); - default: - return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); - } -} - static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { @@ -475,21 +454,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - struct amdgpu_bo *bo = mapping->bo_va->base.bo; - - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { + case AMDGPU_VM_MTYPE_DEFAULT: + case AMDGPU_VM_MTYPE_NC: + default: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_WC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC); + break; + case AMDGPU_VM_MTYPE_CC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC); + break; + case AMDGPU_VM_MTYPE_UC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); + break; + } - *flags &= ~AMDGPU_PTE_NOALLOC; - *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC); + if (vm_flags & AMDGPU_VM_PAGE_NOALLOC) + *flags |= AMDGPU_PTE_NOALLOC; + else + *flags &= ~AMDGPU_PTE_NOALLOC; - if (mapping->flags & AMDGPU_PTE_PRT) { + if (vm_flags & AMDGPU_VM_PAGE_PRT) { *flags |= AMDGPU_PTE_PRT; *flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_LOG; @@ -530,7 +527,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping, - .map_mtype = gmc_v11_0_map_mtype, .get_vm_pde = gmc_v11_0_get_vm_pde, .get_vm_pte = gmc_v11_0_get_vm_pte, .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size, @@ -909,8 +905,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) /* Flush HDP after it is initialized */ amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; adev->mmhub.funcs->set_fault_enable_default(adev, value); gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index b645d3e6a6c8..404cc8c2ff2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -339,6 +336,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried; int vmid, i; + if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready && + (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) { + struct mes_inv_tlbs_pasid_input input = {0}; + input.pasid = pasid; + input.flush_type = flush_type; + input.hub_id = AMDGPU_GFXHUB(0); + /* MES will invalidate all gc_hub for the device from master */ + adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input); + if (all_hub) { + /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */ + input.hub_id = AMDGPU_MMHUB0(0); + adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input); + } + return; + } + for (vmid = 1; vmid < 16; vmid++) { bool valid; @@ -456,20 +469,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid * 0 valid */ -static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) -{ - switch (flags) { - case AMDGPU_VM_MTYPE_DEFAULT: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_NC: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_UC: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC); - default: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); - } -} - static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { @@ -493,18 +492,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - struct amdgpu_bo *bo = mapping->bo_va->base.bo; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { + case AMDGPU_VM_MTYPE_DEFAULT: + *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_NC: + default: + *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_UC: + *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC); + break; + } - *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK; - *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK); + if (vm_flags & AMDGPU_VM_PAGE_NOALLOC) + *flags |= AMDGPU_PTE_NOALLOC; + else + *flags &= ~AMDGPU_PTE_NOALLOC; - if (mapping->flags & AMDGPU_PTE_PRT_GFX12) { + if (vm_flags & AMDGPU_VM_PAGE_PRT) { *flags |= AMDGPU_PTE_PRT_GFX12; *flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_SYSTEM; @@ -546,7 +562,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping, - .map_mtype = gmc_v12_0_map_mtype, .get_vm_pde = gmc_v12_0_get_vm_pde, .get_vm_pte = gmc_v12_0_get_vm_pte, .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, @@ -879,8 +894,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) /* Flush HDP after it is initialized */ amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; adev->mmhub.funcs->set_fault_enable_default(adev, value); gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 8030fcd64210..f6ad7911f1e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { *flags &= ~AMDGPU_PTE_EXECUTABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index a8d5795084fc..93d7ccb7d013 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { *flags &= ~AMDGPU_PTE_EXECUTABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 99ca08e9bdb5..c5e2a2c41e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; *flags &= ~AMDGPU_PTE_PRT; } @@ -1458,9 +1462,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 282197f4ffb1..0d1dd587db5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -78,8 +78,6 @@ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 -#define MAX_MEM_RANGES 8 - static const char * const gfxhub_client_ids[] = { "CB", "DB", @@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { (0x001d43e0 + 0x00001800), }; -static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev) -{ - return !!adev->aid_mask; -} - static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, @@ -641,10 +634,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " for process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -652,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); @@ -801,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) return false; return ((vmhub == AMDGPU_MMHUB0(0) || @@ -1083,27 +1073,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v * 0 valid */ -static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) - -{ - switch (flags) { - case AMDGPU_VM_MTYPE_DEFAULT: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_NC: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_WC: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC); - case AMDGPU_VM_MTYPE_RW: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW); - case AMDGPU_VM_MTYPE_CC: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC); - case AMDGPU_VM_MTYPE_UC: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC); - default: - return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC); - } -} - static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { @@ -1131,8 +1100,9 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo, - struct amdgpu_bo_va_mapping *mapping, + uint32_t vm_flags, uint64_t *flags) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -1142,7 +1112,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_EXT_COHERENT); bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); bool snoop = false; @@ -1172,7 +1141,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, mtype = MTYPE_UC; else mtype = MTYPE_NC; - if (mapping->bo_va->is_xgmi) + if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } } else { @@ -1247,24 +1216,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, } static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - struct amdgpu_bo *bo = mapping->bo_va->base.bo; - - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; + switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { + case AMDGPU_VM_MTYPE_DEFAULT: + case AMDGPU_VM_MTYPE_NC: + default: + *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_WC: + *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC); + break; + case AMDGPU_VM_MTYPE_RW: + *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW); + break; + case AMDGPU_VM_MTYPE_CC: + *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC); + break; + case AMDGPU_VM_MTYPE_UC: + *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC); + break; + } - if (mapping->flags & AMDGPU_PTE_PRT) { + if (vm_flags & AMDGPU_VM_PAGE_PRT) { *flags |= AMDGPU_PTE_PRT; *flags &= ~AMDGPU_PTE_VALID; } if ((*flags & AMDGPU_PTE_VALID) && bo) - gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags); + gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags); } static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, @@ -1385,46 +1373,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static enum amdgpu_memory_partition -gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) -{ - enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; - - if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev, - supp_modes); - - return mode; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) -{ - switch (adev->gmc.num_mem_partitions) { - case 0: - return UNKNOWN_MEMORY_PARTITION_MODE; - case 1: - return AMDGPU_NPS1_PARTITION_MODE; - case 2: - return AMDGPU_NPS2_PARTITION_MODE; - case 4: - return AMDGPU_NPS4_PARTITION_MODE; - default: - return AMDGPU_NPS1_PARTITION_MODE; - } - - return AMDGPU_NPS1_PARTITION_MODE; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) -{ - if (amdgpu_sriov_vf(adev)) - return gmc_v9_0_query_vf_memory_partition(adev); - - return gmc_v9_0_get_memory_partition(adev, NULL); -} - static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) { if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && @@ -1441,12 +1389,11 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, - .map_mtype = gmc_v9_0_map_mtype, .get_vm_pde = gmc_v9_0_get_vm_pde, .get_vm_pte = gmc_v9_0_get_vm_pte, .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, - .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition, .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; @@ -1553,7 +1500,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) adev->gfxhub.funcs = &gfxhub_v1_2_funcs; else adev->gfxhub.funcs = &gfxhub_v1_0_funcs; @@ -1599,7 +1546,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) return; - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); /* Mode detected by hardware and supported modes available */ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) { @@ -1635,7 +1582,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) adev->gmc.xgmi.supported = true; if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { @@ -1722,7 +1669,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { amdgpu_gmc_sysvm_location(adev, mc); } else { amdgpu_gmc_vram_location(adev, mc, base); @@ -1837,7 +1784,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) return 0; } - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { adev->gmc.vmid0_page_table_depth = 1; adev->gmc.vmid0_page_table_block_size = 12; } else { @@ -1863,7 +1810,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) if (r) return r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) r = amdgpu_gmc_pdb0_alloc(adev); } @@ -1885,195 +1832,21 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } -static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) -{ - enum amdgpu_memory_partition mode; - u32 supp_modes; - bool valid; - - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); - - /* Mode detected by hardware not present in supported modes */ - if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && - !(BIT(mode - 1) & supp_modes)) - return false; - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - case AMDGPU_NPS1_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 1); - break; - case AMDGPU_NPS2_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 2); - break; - case AMDGPU_NPS4_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 3 || - adev->gmc.num_mem_partitions == 4); - break; - default: - valid = false; - } - - return valid; -} - -static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid) -{ - int i; - - /* Check if node with id 'nid' is present in 'node_ids' array */ - for (i = 0; i < num_ids; ++i) - if (node_ids[i] == nid) - return true; - - return false; -} - -static void -gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - struct amdgpu_numa_info numa_info; - int node_ids[MAX_MEM_RANGES]; - int num_ranges = 0, ret; - int num_xcc, xcc_id; - uint32_t xcc_mask; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - xcc_mask = (1U << num_xcc) - 1; - - for_each_inst(xcc_id, xcc_mask) { - ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); - if (ret) - continue; - - if (numa_info.nid == NUMA_NO_NODE) { - mem_ranges[0].size = numa_info.size; - mem_ranges[0].numa.node = numa_info.nid; - num_ranges = 1; - break; - } - - if (gmc_v9_0_is_node_present(node_ids, num_ranges, - numa_info.nid)) - continue; - - node_ids[num_ranges] = numa_info.nid; - mem_ranges[num_ranges].numa.node = numa_info.nid; - mem_ranges[num_ranges].size = numa_info.size; - ++num_ranges; - } - - adev->gmc.num_mem_partitions = num_ranges; -} - -static void -gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - enum amdgpu_memory_partition mode; - u32 start_addr = 0, size; - int i, r, l; - - mode = gmc_v9_0_query_memory_partition(adev); - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - adev->gmc.num_mem_partitions = 0; - break; - case AMDGPU_NPS1_PARTITION_MODE: - adev->gmc.num_mem_partitions = 1; - break; - case AMDGPU_NPS2_PARTITION_MODE: - adev->gmc.num_mem_partitions = 2; - break; - case AMDGPU_NPS4_PARTITION_MODE: - if (adev->flags & AMD_IS_APU) - adev->gmc.num_mem_partitions = 3; - else - adev->gmc.num_mem_partitions = 4; - break; - default: - adev->gmc.num_mem_partitions = 1; - break; - } - - /* Use NPS range info, if populated */ - r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - &adev->gmc.num_mem_partitions); - if (!r) { - l = 0; - for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { - if (mem_ranges[i].range.lpfn > - mem_ranges[i - 1].range.lpfn) - l = i; - } - - } else { - if (!adev->gmc.num_mem_partitions) { - dev_err(adev->dev, - "Not able to detect NPS mode, fall back to NPS1"); - adev->gmc.num_mem_partitions = 1; - } - /* Fallback to sw based calculation */ - size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; - size /= adev->gmc.num_mem_partitions; - - for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { - mem_ranges[i].range.fpfn = start_addr; - mem_ranges[i].size = - ((u64)size << AMDGPU_GPU_PAGE_SHIFT); - mem_ranges[i].range.lpfn = start_addr + size - 1; - start_addr += size; - } - - l = adev->gmc.num_mem_partitions - 1; - } - - /* Adjust the last one */ - mem_ranges[l].range.lpfn = - (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; - mem_ranges[l].size = - adev->gmc.real_vram_size - - ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); -} - -static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) -{ - bool valid; - - adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES, - sizeof(struct amdgpu_mem_partition_info), - GFP_KERNEL); - if (!adev->gmc.mem_partitions) - return -ENOMEM; - - /* TODO : Get the range from PSP/Discovery for dGPU */ - if (adev->gmc.is_app_apu) - gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); - else - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); - - if (amdgpu_sriov_vf(adev)) - valid = true; - else - valid = gmc_v9_0_validate_partition_info(adev); - if (!valid) { - /* TODO: handle invalid case */ - dev_WARN(adev->dev, - "Mem ranges not matching with hardware config"); - } - - return 0; -} - static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { + static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; + u32 vram_info; + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); + adev->gmc.vram_vendor = vram_info & 0xF; + } } static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) @@ -2088,7 +1861,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) spin_lock_init(&adev->gmc.invalidate_lock); - if (gmc_v9_0_is_multi_chiplet(adev)) { + if (amdgpu_is_multi_aid(adev)) { gmc_v9_4_3_init_vram_info(adev); } else if (!adev->bios) { if (adev->flags & AMD_IS_APU) { @@ -2238,8 +2011,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_gmc_get_vbios_allocations(adev); - if (gmc_v9_0_is_multi_chiplet(adev)) { - r = gmc_v9_0_init_mem_ranges(adev); + if (amdgpu_is_multi_aid(adev)) { + r = amdgpu_gmc_init_mem_ranges(adev); if (r) return r; } @@ -2267,7 +2040,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vm_manager.first_kfd_vmid = (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) ? + amdgpu_is_multi_aid(adev)) ? 3 : 8; @@ -2279,7 +2052,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_init(adev); return 0; @@ -2289,7 +2062,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_fini(adev); amdgpu_gmc_ras_fini(adev); @@ -2363,7 +2136,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { int r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) amdgpu_gmc_init_pdb0(adev); if (adev->gart.bo == NULL) { @@ -2521,7 +2294,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) * information again. */ if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; } diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 5900b560b7de..333e9c30c091 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -587,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block) /* use gpu virtual address for ih ring * until ih_checken is programmed to allow * use bus address for ih ring by psp bl */ - use_bus_addr = - (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 068ed849dbad..95b3f4e55ec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block) /* use gpu virtual address for ih ring * until ih_checken is programmed to allow * use bus address for ih ring by psp bl */ - use_bus_addr = - (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 40a3530e0453..b32ea4129c61 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block) /* use gpu virtual address for ih ring * until ih_checken is programmed to allow * use bus address for ih ring by psp bl */ - use_bus_addr = - (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c index df898dbb746e..58cd87db8061 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c @@ -34,12 +34,13 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin"); #define TRANSFER_RAM_MASK 0x001c0000 static int imu_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct imu_firmware_header_v1_0 *imu_hdr; struct amdgpu_firmware_info *info = NULL; @@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; @@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev, static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev) { u32 reg_data, size = 0; - const u32 *data; + const u32 *data = NULL; int r = -EINVAL; WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c index 574880d67009..4258d3e0b706 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c @@ -29,6 +29,14 @@ #include "amdgpu.h" #include "isp_v4_1_1.h" +MODULE_FIRMWARE("amdgpu/isp_4_1_1.bin"); + +#define ISP_PERFORMANCE_STATE_LOW 0 +#define ISP_PERFORMANCE_STATE_HIGH 1 + +#define ISP_HIGH_PERFORMANC_XCLK 788 +#define ISP_HIGH_PERFORMANC_ICLK 788 + static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9, ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10, @@ -56,17 +64,137 @@ static struct gpiod_lookup_table isp_sensor_gpio_table = { }, }; +static int isp_poweroff(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0); +} + +static int isp_poweron(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0); +} + +static int isp_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + u32 iclk, xclk; + int ret; + + switch (state) { + case ISP_PERFORMANCE_STATE_HIGH: + xclk = ISP_HIGH_PERFORMANC_XCLK; + iclk = ISP_HIGH_PERFORMANC_ICLK; + break; + case ISP_PERFORMANCE_STATE_LOW: + /* isp runs at default lowest clock-rate on power-on, do nothing */ + return 0; + default: + return -EINVAL; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n", + xclk, state, ret); + return ret; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n", + iclk, state, ret); + return ret; + } + + return 0; +} + +static int isp_genpd_add_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to add\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_add_device(gpd, dev); + if (ret) { + drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n", + pdev->mfd_cell->name, ret); + return -ENODEV; + } + +exit: + /* Continue to add */ + return 0; +} + +static int isp_genpd_remove_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to remove\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n", + pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_remove_device(dev); + if (ret) { + drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret); + return -ENODEV; + } + +exit: + /* Continue to remove */ + return 0; +} + static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) { + const struct software_node *amd_camera_node, *isp4_node; struct amdgpu_device *adev = isp->adev; + struct acpi_device *acpi_dev; int idx, int_idx, num_res, r; - u8 isp_dev_hid[ACPI_ID_LEN]; u64 isp_base; if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; - r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid); + r = amdgpu_acpi_get_isp4_dev(&acpi_dev); if (r) { drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r); /* allow GPU init to progress */ @@ -74,18 +202,28 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) } /* add GPIO resources required for OMNI5C10 sensor */ - if (!strcmp("OMNI5C10", isp_dev_hid)) { + if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) { gpiod_add_lookup_table(&isp_gpio_table); gpiod_add_lookup_table(&isp_sensor_gpio_table); } isp_base = adev->rmmio_base; + isp->ispgpd.name = "ISP_v_4_1_1"; + isp->ispgpd.power_off = isp_poweroff; + isp->ispgpd.power_on = isp_poweron; + isp->ispgpd.set_performance_state = isp_set_performance_state; + + r = pm_genpd_init(&isp->ispgpd, NULL, true); + if (r) { + drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r); + return -EINVAL; + } + isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL); if (!isp->isp_cell) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd cell alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r); goto failure; } @@ -95,19 +233,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) GFP_KERNEL); if (!isp->isp_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r); goto failure; } isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL); if (!isp->isp_pdata) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp platform data alloc failed\n", __func__); + drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r); goto failure; } + amd_camera_node = (const struct software_node *)acpi_dev->driver_data; + isp4_node = software_node_find_by_name(amd_camera_node, "isp4"); + /* initialize isp platform data */ isp->isp_pdata->adev = (void *)adev; isp->isp_pdata->asic_type = adev->asic_type; @@ -136,14 +275,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[0].num_resources = num_res; isp->isp_cell[0].resources = &isp->isp_res[0]; isp->isp_cell[0].platform_data = isp->isp_pdata; + isp->isp_cell[0].swnode = isp4_node; isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); /* initialize isp i2c platform data */ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_i2c_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r); goto failure; } @@ -162,8 +301,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_gpio_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp gpio res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r); goto failure; } @@ -179,10 +317,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[2].platform_data = isp->isp_pdata; isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data); - r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3); + /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */ + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2); if (r) { - drm_err(&adev->ddev, - "%s: add mfd hotplug device failed\n", __func__); + drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r); + goto failure; + } + + r = device_for_each_child(isp->parent, &isp->ispgpd, + isp_genpd_add_device); + if (r) { + drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r); + goto failure; + } + + r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1); + if (r) { + drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r); goto failure; } @@ -201,6 +352,9 @@ failure: static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp) { + device_for_each_child(isp->parent, NULL, + isp_genpd_remove_device); + mfd_remove_devices(isp->parent); kfree(isp->isp_res); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 9e428e669ada..b5bb7f4d607c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .no_user_fence = true, - .extra_dw = 64, + .extra_bytes = 256, .get_rptr = jpeg_v1_0_decode_ring_get_rptr, .get_wptr = jpeg_v1_0_decode_ring_get_wptr, .set_wptr = jpeg_v1_0_decode_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 4cde8a8bcc83..27c76bd424cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -23,7 +23,6 @@ #include "amdgpu.h" #include "amdgpu_jpeg.h" -#include "amdgpu_cs.h" #include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" @@ -118,7 +117,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -764,11 +766,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v2_0_stop(ring->adev); - jpeg_v2_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v2_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v2_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { @@ -794,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_0_dec_ring_get_rptr, .get_wptr = jpeg_v2_0_dec_ring_get_wptr, .set_wptr = jpeg_v2_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -842,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = { .rev = 0, .funcs = &jpeg_v2_0_ip_funcs, }; - -/** - * jpeg_v2_dec_ring_parse_cs - command submission parser - * - * @parser: Command submission parser context - * @job: the job to parse - * @ib: the IB to parse - * - * Parse the command stream, return -EINVAL for invalid packet, - * 0 otherwise - */ -int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, - struct amdgpu_job *job, - struct amdgpu_ib *ib) -{ - u32 i, reg, res, cond, type; - struct amdgpu_device *adev = parser->adev; - - for (i = 0; i < ib->length_dw ; i += 2) { - reg = CP_PACKETJ_GET_REG(ib->ptr[i]); - res = CP_PACKETJ_GET_RES(ib->ptr[i]); - cond = CP_PACKETJ_GET_COND(ib->ptr[i]); - type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); - - if (res) /* only support 0 at the moment */ - return -EINVAL; - - switch (type) { - case PACKETJ_TYPE0: - if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || - reg > JPEG_REG_RANGE_END) { - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - } - break; - case PACKETJ_TYPE3: - if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || - reg > JPEG_REG_RANGE_END) { - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - } - break; - case PACKETJ_TYPE6: - if (ib->ptr[i] == CP_PACKETJ_NOP) - continue; - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - default: - dev_err(adev->dev, "Unknown packet type %d !\n", type); - return -EINVAL; - } - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 63fadda7a673..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -45,9 +45,6 @@ #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 -#define JPEG_REG_RANGE_START 0x4000 -#define JPEG_REG_RANGE_END 0x41c2 - void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, @@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); -int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, - struct amdgpu_job *job, - struct amdgpu_ib *ib); extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 8b39e114f3be..20983f126b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v2_5_stop_inst(ring->adev, ring->me); jpeg_v2_5_start_inst(ring->adev, ring->me); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { @@ -690,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -721,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 2f8510c2986b..d1a011c40ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v3_0_stop(ring->adev); - jpeg_v3_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v3_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v3_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { @@ -585,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v3_0_dec_ring_get_rptr, .get_wptr = jpeg_v3_0_dec_ring_get_wptr, .set_wptr = jpeg_v3_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index f17ec5414fd6..33db2c1ae6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EINVAL; + int r; - jpeg_v4_0_stop(ring->adev); - jpeg_v4_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { @@ -753,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 79e342d5ab28..aae7328973d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return 0; } @@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -446,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE); } - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev)) amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return ret; @@ -1143,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { if (amdgpu_sriov_vf(ring->adev)) return -EOPNOTSUPP; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v4_0_3_core_stall_reset(ring); jpeg_v4_0_3_start_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { @@ -1176,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 974030a5c03c..54fd9c800c40 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); if (r) return r; @@ -685,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { struct amdgpu_device *adev = ip_block->adev; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_5_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_5_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, @@ -790,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v4_0_5_ring_reset, }; static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 31d213ccbe0a..46bf15dce2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - return 0; + + return r; } /** @@ -584,7 +584,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { struct amdgpu_device *adev = ip_block->adev; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; if (enable) { if (!jpeg_v5_0_0_is_idle(ip_block)) @@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v5_0_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v5_0_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, @@ -667,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v5_0_0_ring_reset, }; static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index 3b6f65a25646..baf097d2e1ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -196,18 +196,25 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) } } + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) { + dev_err(adev->dev, "Failed to initialize jpeg ras block!\n"); + return r; + } + } + r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1)); if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - return 0; + return r; } /** @@ -226,8 +233,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -309,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); } - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev)) amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return ret; @@ -691,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { struct amdgpu_device *adev = ip_block->adev; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; @@ -834,14 +840,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EOPNOTSUPP; - + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v5_0_1_core_stall_reset(ring); jpeg_v5_0_1_init_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { @@ -1018,8 +1024,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban /* reference to smu driver if header file */ static int jpeg_v5_0_1_err_codes[] = { - 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */ - 24, 25, 26, 27, 28, 29, 30, 31 + 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */ + 24, 25, 26, 27, 28, 29, 30, 31, + 48, 49, 50, 51, }; static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, @@ -1060,6 +1067,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm if (r) return r; + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, + &jpeg_v5_0_1_aca_info, NULL); + if (r) + goto late_fini; + if (amdgpu_ras_is_supported(adev, ras_block->block) && adev->jpeg.inst->ras_poison_irq.funcs) { r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); @@ -1067,11 +1079,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm goto late_fini; } - r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, - &jpeg_v5_0_1_aca_info, NULL); - if (r) - goto late_fini; - return 0; late_fini: diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index d6f50b13e2ba..2db9b2c63693 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -21,6 +21,7 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_gfx.h" #include "mes_userqueue.h" @@ -198,6 +199,53 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr, return 0; } +static int mes_userq_detect_and_reset(struct amdgpu_device *adev, + int queue_type) +{ + int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); + struct mes_detect_and_reset_queue_input input; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + unsigned int hung_db_num = 0; + int queue_id, r, i; + u32 db_array[4]; + + if (db_array_size > 4) { + dev_err(adev->dev, "DB array size (%d vs 4) too small\n", + db_array_size); + return -EINVAL; + } + + memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input)); + + input.queue_type = queue_type; + + amdgpu_mes_lock(&adev->mes); + r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false, + &hung_db_num, db_array); + amdgpu_mes_unlock(&adev->mes); + if (r) { + dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); + } else if (hung_db_num) { + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + if (queue->queue_type == queue_type) { + for (i = 0; i < hung_db_num; i++) { + if (queue->doorbell_index == db_array[i]) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + atomic_inc(&adev->gpu_reset_counter); + amdgpu_userq_fence_driver_force_completion(queue); + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); + } + } + } + } + } + } + + return r; +} + static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct drm_amdgpu_userq_in *args_in, struct amdgpu_usermode_queue *queue) @@ -206,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; struct drm_amdgpu_userq_in *mqd_user = args_in; struct amdgpu_mqd_prop *userq_props; + struct amdgpu_gfx_shadow_info shadow_info; int r; /* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -215,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, return -ENOMEM; } - if (!mqd_user->wptr_va || !mqd_user->rptr_va || - !mqd_user->queue_va || mqd_user->queue_size == 0) { - DRM_ERROR("Invalid MQD parameters for userqueue\n"); - r = -EINVAL; - goto free_props; - } - r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); if (r) { DRM_ERROR("Failed to create MQD object for userqueue\n"); @@ -238,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->doorbell_index = queue->doorbell_index; userq_props->fence_address = queue->fence_drv->gpu_addr; + if (adev->gfx.funcs->get_gfx_shadow_info) + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -254,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } + if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, + max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) + goto free_mqd; + userq_props->eop_gpu_addr = compute_mqd->eop_va; userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; @@ -281,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->csa_addr = mqd_gfx_v11->csa_va; userq_props->tmz_queue = mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; + + if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, + shadow_info.shadow_size)) + goto free_mqd; + kfree(mqd_gfx_v11); } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; @@ -298,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } + if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, + shadow_info.csa_size)) + goto free_mqd; + userq_props->csa_addr = mqd_sdma_v11->csa_va; kfree(mqd_sdma_v11); } @@ -347,9 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr, amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); } +static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct mes_suspend_gang_input queue_input; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + signed long timeout = 2100000; /* 2100 ms */ + u64 fence_gpu_addr; + u32 fence_offset; + u64 *fence_ptr; + int i, r; + + if (queue->state != AMDGPU_USERQ_STATE_MAPPED) + return 0; + r = amdgpu_device_wb_get(adev, &fence_offset); + if (r) + return r; + + fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4); + fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; + *fence_ptr = 0; + + memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input)); + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + queue_input.suspend_fence_addr = fence_gpu_addr; + queue_input.suspend_fence_value = 1; + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) { + DRM_ERROR("Failed to suspend gang: %d\n", r); + goto out; + } + + for (i = 0; i < timeout; i++) { + if (*fence_ptr == 1) + goto out; + udelay(1); + } + r = -ETIMEDOUT; + +out: + amdgpu_device_wb_free(adev, fence_offset); + return r; +} + +static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct mes_resume_gang_input queue_input; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + int r; + + if (queue->state == AMDGPU_USERQ_STATE_HUNG) + return -EINVAL; + if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED) + return 0; + + memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input)); + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) + dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r); + return r; +} + const struct amdgpu_userq_funcs userq_mes_funcs = { .mqd_create = mes_userq_mqd_create, .mqd_destroy = mes_userq_mqd_destroy, .unmap = mes_userq_unmap, .map = mes_userq_map, + .detect_and_reset = mes_userq_detect_and_reset, + .preempt = mes_userq_preempt, + .restore = mes_userq_restore, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 28eb846280dd..e82188431f79 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -66,6 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); #define GFX_MES_DRAM_SIZE 0x80000 #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE) +#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4 + static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -641,8 +643,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, break; case MES_MISC_OP_CHANGE_CONFIG: if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) { - dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n"); - return -EINVAL; + dev_warn_once(mes->adev->dev, + "MES FW version must be larger than 0x63 to support limit single process feature.\n"); + return 0; } misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; misc_pkt.change_config.opcode = @@ -710,6 +713,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f) + mes_set_hw_res_pkt.enable_lr_compute_wa = 1; + else + dev_info_once(mes->adev->dev, + "MES FW version must be >= 0x7f to enable LR compute workaround.\n"); + if (amdgpu_mes_log_enable) { mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = @@ -783,6 +792,32 @@ static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes, offsetof(union MESAPI__RESET, api_status)); } +static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes, + struct mes_detect_and_reset_queue_input *input) +{ + union MESAPI__RESET mes_reset_queue_pkt; + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.queue_type = + convert_to_mes_queue_type(input->queue_type); + mes_reset_queue_pkt.doorbell_offset_addr = + mes->hung_queue_db_array_gpu_addr; + + if (input->detect_only) + mes_reset_queue_pkt.hang_detect_only = 1; + else + mes_reset_queue_pkt.hang_detect_then_reset = 1; + + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__RESET, api_status)); +} + static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .add_hw_queue = mes_v11_0_add_hw_queue, .remove_hw_queue = mes_v11_0_remove_hw_queue, @@ -792,6 +827,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .resume_gang = mes_v11_0_resume_gang, .misc_op = mes_v11_0_misc_op, .reset_hw_queue = mes_v11_0_reset_hw_queue, + .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues, }; static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev, @@ -1684,6 +1720,8 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; + adev->mes.hung_queue_db_array_size = + MES11_HUNG_DB_OFFSET_ARRAY_SIZE; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 6b222630f3fa..aff06f06aeee 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -47,6 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); #define MES_EOP_SIZE 2048 +#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4 + static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -108,6 +110,7 @@ static const char *mes_v12_0_opcodes[] = { "SET_SE_MODE", "SET_GANG_SUBMIT", "SET_HW_RSRC_1", + "INVALIDATE_TLBS", }; static const char *mes_v12_0_misc_opcodes[] = { @@ -567,13 +570,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes, static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes, struct mes_suspend_gang_input *input) { - return 0; + union MESAPI__SUSPEND mes_suspend_gang_pkt; + + memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt)); + + mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND; + mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs; + mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr; + mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr; + mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value; + + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, + &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt), + offsetof(union MESAPI__SUSPEND, api_status)); } static int mes_v12_0_resume_gang(struct amdgpu_mes *mes, struct mes_resume_gang_input *input) { - return 0; + union MESAPI__RESUME mes_resume_gang_pkt; + + memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt)); + + mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME; + mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs; + mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr; + + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, + &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt), + offsetof(union MESAPI__RESUME, api_status)); } static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe) @@ -738,6 +769,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82) + mes_set_hw_res_pkt.enable_lr_compute_wa = 1; + else + dev_info_once(adev->dev, + "MES FW version must be >= 0x82 to enable LR compute workaround.\n"); /* * Keep oversubscribe timer for sdma . When we have unmapped doorbell @@ -879,6 +915,74 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, offsetof(union MESAPI__RESET, api_status)); } +static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes, + struct mes_detect_and_reset_queue_input *input) +{ + union MESAPI__RESET mes_reset_queue_pkt; + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.queue_type = + convert_to_mes_queue_type(input->queue_type); + mes_reset_queue_pkt.doorbell_offset_addr = + mes->hung_queue_db_array_gpu_addr; + + if (input->detect_only) + mes_reset_queue_pkt.hang_detect_only = 1; + else + mes_reset_queue_pkt.hang_detect_then_reset = 1; + + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__RESET, api_status)); +} + +static int mes_v12_inv_tlb_convert_hub_id(uint8_t id) +{ + /* + * MES doesn't support invalidate gc_hub on slave xcc individually + * master xcc will invalidate all gc_hub for the partition + */ + if (AMDGPU_IS_GFXHUB(id)) + return 0; + else if (AMDGPU_IS_MMHUB0(id)) + return 1; + else + return -EINVAL; + +} + +static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes, + struct mes_inv_tlbs_pasid_input *input) +{ + union MESAPI__INV_TLBS mes_inv_tlbs; + int ret; + + memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs)); + + mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER; + mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS; + mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_inv_tlbs.invalidate_tlbs.inv_sel = 0; + mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type; + mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid; + + /*convert amdgpu_mes_hub_id to mes expected hub_id */ + ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id); + if (ret < 0) + return -EINVAL; + mes_inv_tlbs.invalidate_tlbs.hub_id = ret; + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE, + &mes_inv_tlbs, sizeof(mes_inv_tlbs), + offsetof(union MESAPI__INV_TLBS, api_status)); + +} + static const struct amdgpu_mes_funcs mes_v12_0_funcs = { .add_hw_queue = mes_v12_0_add_hw_queue, .remove_hw_queue = mes_v12_0_remove_hw_queue, @@ -888,6 +992,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = { .resume_gang = mes_v12_0_resume_gang, .misc_op = mes_v12_0_misc_op, .reset_hw_queue = mes_v12_0_reset_hw_queue, + .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid, + .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues, }; static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev, @@ -1793,6 +1899,8 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; + adev->mes.hung_queue_db_array_size = + MES12_HUNG_DB_OFFSET_ARRAY_SIZE; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { r = amdgpu_mes_init_microcode(adev, pipe); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 76167fadb292..cc688ae79e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -76,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; u32 inst_mask; int i; @@ -95,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) if (adev->gmc.pdb0_bo) { WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 134c4ec10887..910337dc28d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -36,40 +36,47 @@ static const char *mmhub_client_ids_v3_0_1[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", - [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [8][0] = "MPM", + [12][0] = "ISPTNR", + [14][0] = "ISPCRD0", + [15][0] = "ISPCRD1", + [16][0] = "ISPCRD2", + [22][0] = "HDP", + [23][0] = "LSDMA", + [24][0] = "JPEG", + [27][0] = "VSCH", + [28][0] = "VCNU", + [29][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", - [8][1] = "MPIO", - [10][1] = "DBGU0", - [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", - [14][1] = "XDP", - [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [8][1] = "MPM", + [10][1] = "ISPMWR0", + [11][1] = "ISPMWR1", + [12][1] = "ISPTNR", + [13][1] = "ISPSWR", + [14][1] = "ISPCWR0", + [15][1] = "ISPCWR1", + [16][1] = "ISPCWR2", + [17][1] = "ISPCWR3", + [18][1] = "XDP", + [21][1] = "OSSSYS", + [22][1] = "HDP", + [23][1] = "LSDMA", + [24][1] = "JPEG", + [27][1] = "VSCH", + [28][1] = "VCNU", + [29][1] = "VCN", }; static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c index bc3d6c2fc87a..f6fc9778bc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -40,30 +40,129 @@ static const char *mmhub_client_ids_v3_3[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPM", + [9][0] = "ISPPDPRD", + [10][0] = "ISPCSTATRD", + [11][0] = "ISPBYRPRD", + [12][0] = "ISPRGBPRD", + [13][0] = "ISPMCFPRD", + [14][0] = "ISPMCFPRD1", + [15][0] = "ISPYUVPRD", + [16][0] = "ISPMCSCRD", + [17][0] = "ISPGDCRD", + [18][0] = "ISPLMERD", + [22][0] = "ISPXT1", + [23][0] = "ISPIXT1", [24][0] = "HDP", [25][0] = "LSDMA", [26][0] = "JPEG", [27][0] = "VPE", + [28][0] = "VSCH", [29][0] = "VCNU", [30][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", + [5][1] = "ISPCSISWR", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPM", + [9][1] = "ISPPDPWR", + [10][1] = "ISPCSTATWR", + [11][1] = "ISPBYRPWR", + [12][1] = "ISPRGBPWR", + [13][1] = "ISPMCFPWR", + [14][1] = "ISPMWR0", + [15][1] = "ISPYUVPWR", + [16][1] = "ISPMCSCWR", + [17][1] = "ISPGDCWR", + [18][1] = "ISPLMEWR", + [20][1] = "ISPMWR2", [21][1] = "OSSSYS", + [22][1] = "ISPXT1", + [23][1] = "ISPIXT1", [24][1] = "HDP", [25][1] = "LSDMA", [26][1] = "JPEG", [27][1] = "VPE", + [28][1] = "VSCH", [29][1] = "VCNU", [30][1] = "VCN", }; +static const char *mmhub_client_ids_v3_3_1[][2] = { + [0][0] = "VMC", + [4][0] = "DCEDMC", + [6][0] = "MP0", + [7][0] = "MP1", + [8][0] = "MPM", + [24][0] = "HDP", + [25][0] = "LSDMA", + [26][0] = "JPEG0", + [27][0] = "VPE0", + [28][0] = "VSCH", + [29][0] = "VCNU0", + [30][0] = "VCN0", + [32+1][0] = "ISPXT", + [32+2][0] = "ISPIXT", + [32+9][0] = "ISPPDPRD", + [32+10][0] = "ISPCSTATRD", + [32+11][0] = "ISPBYRPRD", + [32+12][0] = "ISPRGBPRD", + [32+13][0] = "ISPMCFPRD", + [32+14][0] = "ISPMCFPRD1", + [32+15][0] = "ISPYUVPRD", + [32+16][0] = "ISPMCSCRD", + [32+17][0] = "ISPGDCRD", + [32+18][0] = "ISPLMERD", + [32+22][0] = "ISPXT1", + [32+23][0] = "ISPIXT1", + [32+26][0] = "JPEG1", + [32+27][0] = "VPE1", + [32+29][0] = "VCNU1", + [32+30][0] = "VCN1", + [3][1] = "DCEDWB", + [4][1] = "DCEDMC", + [6][1] = "MP0", + [7][1] = "MP1", + [8][1] = "MPM", + [21][1] = "OSSSYS", + [24][1] = "HDP", + [25][1] = "LSDMA", + [26][1] = "JPEG0", + [27][1] = "VPE0", + [28][1] = "VSCH", + [29][1] = "VCNU0", + [30][1] = "VCN0", + [32+1][1] = "ISPXT", + [32+2][1] = "ISPIXT", + [32+5][1] = "ISPCSISWR", + [32+9][1] = "ISPPDPWR", + [32+10][1] = "ISPCSTATWR", + [32+11][1] = "ISPBYRPWR", + [32+12][1] = "ISPRGBPWR", + [32+13][1] = "ISPMCFPWR", + [32+14][1] = "ISPMWR0", + [32+15][1] = "ISPYUVPWR", + [32+16][1] = "ISPMCSCWR", + [32+17][1] = "ISPGDCWR", + [32+18][1] = "ISPLMEWR", + [32+19][1] = "ISPMWR1", + [32+20][1] = "ISPMWR2", + [32+22][1] = "ISPXT1", + [32+23][1] = "ISPIXT1", + [32+26][1] = "JPEG1", + [32+27][1] = "VPE1", + [32+29][1] = "VCNU1", + [32+30][1] = "VCN1", +}; + static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid, uint32_t flush_type) { @@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 3, 0): - case IP_VERSION(3, 3, 1): case IP_VERSION(3, 3, 2): mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ? mmhub_client_ids_v3_3[cid][rw] : cid == 0x140 ? "UMSCH" : NULL; break; + case IP_VERSION(3, 3, 1): + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ? + mmhub_client_ids_v3_3_1[cid][rw] : + cid == 0x140 ? "UMSCH" : NULL; + break; default: mmhub_cid = NULL; break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c index f2ab5001b492..951998454b25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c @@ -37,39 +37,31 @@ static const char *mmhub_client_ids_v4_1_0[][2] = { [0][0] = "VMC", [4][0] = "DCEDMC", - [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [16][0] = "LSDMA", + [17][0] = "JPEG", + [19][0] = "VCNU", + [22][0] = "VSCH", + [23][0] = "HDP", + [32+23][0] = "VCNRD", [3][1] = "DCEDWB", [4][1] = "DCEDMC", - [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPIO", [10][1] = "DBGU0", [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", + [12][1] = "DBGUNBIO", [14][1] = "XDP", [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [16][1] = "LSDMA", + [17][1] = "JPEG", + [18][1] = "VCNWR", + [19][1] = "VCNU", + [22][1] = "VSCH", + [23][1] = "HDP", }; static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 48101a34e049..9a40107a0869 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } } -static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work) +static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work) { - struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work); + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); if (down_read_trylock(&adev->reset_domain->sem)) { amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_request_bad_pages(adev); + up_read(&adev->reset_domain->sem); + } +} + +/** + * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information + * @work: pointer to the work_struct + * + * This work handler is triggered when bad pages are ready, and it reinitializes + * the data exchange region to retrieve updated bad page information from the host. + */ +static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_init_data_exchange(adev); up_read(&adev->reset_domain->sem); } @@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (event) { + case IDH_RAS_BAD_PAGES_READY: + xgpu_ai_mailbox_send_ack(adev); + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.handle_bad_pages_work); + break; case IDH_RAS_BAD_PAGES_NOTIFICATION: xgpu_ai_mailbox_send_ack(adev); if (amdgpu_sriov_runtime(adev)) - schedule_work(&adev->virt.bad_pages_work); + schedule_work(&adev->virt.req_bad_pages_work); break; case IDH_UNRECOV_ERR_NOTIFICATION: xgpu_ai_mailbox_send_ack(adev); @@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) } INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); - INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work); + INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work); + INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index f6d8597452ed..e5282a5d05d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -202,8 +202,8 @@ send_request: case IDH_REQ_RAS_CPER_DUMP: event = IDH_RAS_CPER_DUMP_READY; break; - case IDH_REQ_RAS_BAD_PAGES: - event = IDH_RAS_BAD_PAGES_READY; + case IDH_REQ_RAS_CHK_CRITI: + event = IDH_REQ_RAS_CHK_CRITI_READY; break; default: break; @@ -359,14 +359,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) } } -static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work) +static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work) { - struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work); + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); if (down_read_trylock(&adev->reset_domain->sem)) { amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_request_bad_pages(adev); + up_read(&adev->reset_domain->sem); + } +} + +/** + * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information + * @work: pointer to the work_struct + * + * This work handler is triggered when bad pages are ready, and it reinitializes + * the data exchange region to retrieve updated bad page information from the host. + */ +static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_init_data_exchange(adev); up_read(&adev->reset_domain->sem); } @@ -397,10 +415,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (event) { + case IDH_RAS_BAD_PAGES_READY: + xgpu_nv_mailbox_send_ack(adev); + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.handle_bad_pages_work); + break; case IDH_RAS_BAD_PAGES_NOTIFICATION: xgpu_nv_mailbox_send_ack(adev); if (amdgpu_sriov_runtime(adev)) - schedule_work(&adev->virt.bad_pages_work); + schedule_work(&adev->virt.req_bad_pages_work); break; case IDH_UNRECOV_ERR_NOTIFICATION: xgpu_nv_mailbox_send_ack(adev); @@ -485,7 +508,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) } INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); - INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work); + INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work); + INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work); return 0; } @@ -535,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev) return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); } +static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr) +{ + uint32_t addr_hi, addr_lo; + + addr_hi = (uint32_t)(addr >> 32); + addr_lo = (uint32_t)(addr & 0xFFFFFFFF); + return xgpu_nv_send_access_requests_with_param( + adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -548,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_ras_err_count = xgpu_nv_req_ras_err_count, .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, .req_bad_pages = xgpu_nv_req_ras_bad_pages, + .req_ras_chk_criti = xgpu_nv_check_vf_critical_region }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 5808689562cc..c1083e5e41e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -43,6 +43,7 @@ enum idh_request { IDH_REQ_RAS_ERROR_COUNT = 203, IDH_REQ_RAS_CPER_DUMP = 204, IDH_REQ_RAS_BAD_PAGES = 205, + IDH_REQ_RAS_CHK_CRITI = 206 }; enum idh_event { @@ -62,6 +63,7 @@ enum idh_event { IDH_RAS_BAD_PAGES_READY = 15, IDH_RAS_BAD_PAGES_NOTIFICATION = 16, IDH_UNRECOV_ERR_NOTIFICATION = 17, + IDH_REQ_RAS_CHK_CRITI_READY = 18, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index d5002ff931d8..860bc5cb03c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8 -+ * BIF_SDMA4_DOORBELL_RANGE: -+ * ARCTURUS: 0x3be0 -+ * ALDEBARAN: 0x3be4 + * BIF_SDMA4_DOORBELL_RANGE: + * ARCTURUS: 0x3be0 + * ALDEBARAN: 0x3be4 */ if (adev->asic_type == CHIP_ALDEBARAN && instance == 4) reg = instance + 0x4 + 0x1 + diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index a376f072700d..1c22bc11c1f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -31,9 +31,6 @@ #define NPS_MODE_MASK 0x000000FFL -/* Core 0 Port 0 counter */ -#define smnPCIEP_NAK_COUNTER 0x1A340218 - static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -467,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) } } -static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev) -{ - u32 val, nak_r, nak_g; - - if (adev->flags & AMD_IS_APU) - return 0; - - /* Get the number of NAKs received and generated */ - val = RREG32_PCIE(smnPCIEP_NAK_COUNTER); - nak_r = val & 0xFFFF; - nak_g = val >> 16; - - /* Add the total number of NAKs, i.e the number of replays */ - return (nak_r + nak_g); -} - #define MMIO_REG_HOLE_OFFSET 0x1A000 static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev) @@ -524,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, - .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 83e9782aef39..8f4817404f10 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block; void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void nv_set_virt_ops(struct amdgpu_device *adev); +int cyan_skillfish_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index f4a91b126c73..73f87131a7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -106,7 +106,9 @@ enum psp_gfx_cmd_id /*IDs of performance monitoring/profiling*/ GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ /* Dynamic memory partitioninig (NPS mode change)*/ - GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */ + GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */ }; /* PSP boot config sub-commands */ @@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg { uint32_t boot_cfg; /* boot config data */ }; +/* Command-specific response for fw reserve info */ +struct psp_gfx_uresp_fw_reserve_info { + uint32_t reserve_base_address_hi; + uint32_t reserve_base_address_lo; + uint32_t reserve_size; +}; + /* Union of command-specific responses for GPCOM ring. */ union psp_gfx_uresp { struct psp_gfx_uresp_reserved reserved; struct psp_gfx_uresp_bootcfg boot_cfg; struct psp_gfx_uresp_fwar_db_info fwar_db_info; + struct psp_gfx_uresp_fw_reserve_info fw_reserve_info; }; /* Structure of GFX Response buffer. diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 145186a1e48f..3584b8c18fd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 215543575f47..64b240b51f1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -149,14 +149,12 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) int ret; int retry_loop; - for (retry_loop = 0; retry_loop < 10; retry_loop++) { + for (retry_loop = 0; retry_loop < 20; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x8000FFFF, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); return ret; @@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -393,17 +397,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) msleep(500); - offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); - - if (ret) { - DRM_INFO("psp mode 1 reset failed!\n"); - return -EINVAL; - } - - DRM_INFO("psp mode1 reset succeed \n"); - return 0; } @@ -421,8 +414,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -601,7 +595,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -638,7 +632,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); @@ -659,7 +653,8 @@ static const struct psp_funcs psp_v11_0_funcs = { .ring_get_wptr = psp_v11_0_ring_get_wptr, .ring_set_wptr = psp_v11_0_ring_set_wptr, .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw, - .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw + .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw, + .wait_for_bootloader = psp_v11_0_wait_for_bootloader }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c index 5697760a819b..93787a90d598 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c @@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, @@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 80153f837470..4c6450d62299 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -82,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -97,7 +97,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) psp_gfxdrv_command_reg); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -118,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -133,8 +133,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) psp_gfxdrv_command_reg); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -163,7 +163,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -184,11 +184,13 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -219,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -233,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, + 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index ead616c11705..af4a7d7c4abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -182,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp) ready having bit 31 of C2PMSG_33 set to 1 */ ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; @@ -213,7 +213,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) { ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -362,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); if (!ret) psp_v13_0_init_sos_version(psp); @@ -384,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -393,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -430,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -460,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -524,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -677,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -714,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36); @@ -739,8 +745,9 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -764,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp, /* Confirm PSP is ready to start */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; @@ -799,7 +806,7 @@ static int psp_v13_0_dump_spirom(struct psp_context *psp, /* Confirm PSP is ready to start */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; @@ -926,8 +933,9 @@ static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val, WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id); WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c index eaa5512a21da..5f39a2edcc95 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c @@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 256288c6cd78..38dfc5c19f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -34,7 +34,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin"); @@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), - 0, true); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, @@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc */ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36); @@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp, int ret; /* Confirm PSP is ready to start */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index f6b75e3e47ff..833830bc3e2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); /* Change IH ring for UMC */ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); @@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); } static int psp_v3_1_ring_create(struct psp_context *psp, @@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_101), 0x80000000, - 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, 0); } else { /* Write low address of the ring to C2PMSG_69 */ @@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_64), 0x80000000, - 0x8000FFFF, false); - + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, 0); } return ret; } @@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, 0); return ret; } @@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 33ed2b158fcd..f38004e6064e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev, dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n", task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + task_info->task.comm, task_info->task.pid); amdgpu_vm_put_task_info(task_info); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index bb82c652e4c0..36b1ca73c2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -110,6 +110,8 @@ static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev); static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring); static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring); +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id); static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) @@ -1342,6 +1344,7 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = { .stop_kernel_queue = &sdma_v4_4_2_stop_queue, .start_kernel_queue = &sdma_v4_4_2_restore_queue, + .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine, }; static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) @@ -1653,38 +1656,17 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0; } -static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, false); -} - -static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - if (!adev->sdma.has_page_queue) - return false; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, true); -} - -static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; u32 id = ring->me; int r; - if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - amdgpu_amdkfd_suspend(adev, false); - r = amdgpu_sdma_reset_engine(adev, id); - amdgpu_amdkfd_resume(adev, false); - + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, id, false); + amdgpu_amdkfd_resume(adev, true); return r; } @@ -1730,7 +1712,7 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 inst_mask; - int i; + int i, r; inst_mask = 1 << ring->me; udelay(50); @@ -1747,7 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring) return -ETIMEDOUT; } - return sdma_v4_4_2_inst_start(adev, inst_mask, true); + r = sdma_v4_4_2_inst_start(adev, inst_mask, true); + + return r; +} + +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id) +{ + /* For SDMA 4.x, use the existing DPM interface for backward compatibility + * we need to convert the logical instance ID to physical instance ID before reset. + */ + return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id)); } static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, @@ -1889,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev, if (task_info) { dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n", task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + task_info->task.comm, task_info->task.pid); amdgpu_vm_put_task_info(task_info); } @@ -2144,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_ring_is_guilty, }; static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { @@ -2177,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_page_ring_is_guilty, }; static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 37f4b5b4a098..7dc67a22a7a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1428,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): - if (adev->sdma.instance[0].fw_version >= 35) + if ((adev->sdma.instance[0].fw_version >= 35) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1539,17 +1540,27 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) return 0; } -static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - u32 inst_id = ring->me; int r; + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + amdgpu_amdkfd_suspend(adev, true); - r = amdgpu_sdma_reset_engine(adev, inst_id); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); amdgpu_amdkfd_resume(adev, true); + if (r) + return r; - return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) @@ -1616,6 +1627,7 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring) r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 0b40411b92a0..3bd44c24f692 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1347,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 2, 2): case IP_VERSION(5, 2, 3): case IP_VERSION(5, 2, 4): - if (adev->sdma.instance[0].fw_version >= 76) + if ((adev->sdma.instance[0].fw_version >= 76) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(5, 2, 5): - if (adev->sdma.instance[0].fw_version >= 34) + if ((adev->sdma.instance[0].fw_version >= 34) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1452,17 +1454,27 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) return -ETIMEDOUT; } -static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - u32 inst_id = ring->me; int r; + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + amdgpu_amdkfd_suspend(adev, true); - r = amdgpu_sdma_reset_engine(adev, inst_id); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); amdgpu_amdkfd_resume(adev, true); + if (r) + return r; - return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring) @@ -1532,6 +1544,7 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring) r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index a9bdf8d61d6c..db6e41967f12 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1355,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): - if (adev->sdma.instance[0].fw_version >= 21) + if ((adev->sdma.instance[0].fw_version >= 21) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1376,15 +1377,35 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(6, 0, 0): - if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq) + if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 1): + if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 0, 2): - if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq) + if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 0, 3): - if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq) + if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 0): + if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 1): + if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 2): + if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 3): + if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; default: @@ -1550,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v6_0_gfx_resume_instance(adev, i, true); + r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 86903eccbd4e..326ecc8d37d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -802,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) return false; } -static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v7_0_gfx_resume_instance(adev, i, true); + r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } /** @@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_sdma_sysfs_reset_mask_init(adev); if (r) @@ -1352,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(7, 0, 0): case IP_VERSION(7, 0, 1): - if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq) + if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index dd2d66090d23..68aef47254a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev) adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) DRM_ERROR("Failed to register hw i2c, err: %d\n", res); @@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev) void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev) { - struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus; - - i2c_del_adapter(control); adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c457be3a3c56..9785fada4fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev) void soc15_set_virt_ops(struct amdgpu_device *adev) { adev->virt.ops = &xgpu_ai_virt_ops; - /* init soc15 reg base early enough so we can * request request full access for sriov before * set_ip_blocks. */ @@ -1218,6 +1217,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) AMD_PG_SUPPORT_JPEG; /*TODO: need a new external_rev_id for GC 9.4.4? */ adev->external_rev_id = adev->rev_id + 0x46; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) + adev->external_rev_id = adev->rev_id + 0x50; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index ef7c603b50ae..c8ac11a9cdef 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index e590cbdd8de9..8dc32787d625 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); - if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0)) + /* The IP block decode of consumption is SMU */ + if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) { + con->umc_ecc_log.consumption_q_count++; return 0; + } if (!status) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 5dbaebb592b3..2e79a3afc774 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * - * Initialize the hardware, boot up the VCPU and do some testing + * Initialize the hardware, boot up the VCPU and do some testing. + * + * On SI, the UVD is meant to be used in a specific power state, + * or alternatively the driver can manually enable its clock. + * In amdgpu we use the dedicated UVD power state when DPM is enabled. + * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state + * for the SMU and afterwards enables the UVD clock. + * This is automatically done by amdgpu_uvd_ring_begin_use when work + * is submitted to the UVD ring. Here, we have to call it manually + * in order to power up UVD before firmware validation. + * + * Note that we must not disable the UVD clock here, as that would + * cause the ring test to fail. However, UVD is powered off + * automatically after the ring test: amdgpu_uvd_ring_end_use calls + * the UVD idle work handler which will disable the UVD clock when + * all fences are signalled. */ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) { @@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) int r; uvd_v3_1_mc_resume(adev); + uvd_v3_1_enable_mgcg(adev, true); + + /* Make sure UVD is powered during FW validation. + * It's going to be automatically powered off after the ring test. + */ + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); r = uvd_v3_1_fw_validate(adev); if (r) { @@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) return r; } - uvd_v3_1_enable_mgcg(adev, true); - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - uvd_v3_1_start(adev); r = amdgpu_ring_test_helper(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 1c07b701d0e4..ceb94bbb03a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9d237b5937fb..1f8866f3f63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index c74947705d77..a316797875a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode; if (amdgpu_vcnfw_log) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; amdgpu_vcn_fwlog_init(adev->vcn.inst); @@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) jpeg_v1_0_sw_fini(ip_block); - r = amdgpu_vcn_sw_fini(adev, 0); + amdgpu_vcn_sw_fini(adev, 0); kfree(adev->vcn.ip_dump); - return r; + return 0; } /** @@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - ring = &adev->vcn.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, @@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - ring = &adev->vcn.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 148b651be7ca..8897dcc9c1a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst, static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst); + /** * vcn_v2_0_early_init - set function pointers and load microcode * @@ -134,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); - uint32_t *ptr; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -213,6 +213,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) } adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + adev->vcn.inst[0].reset = vcn_v2_0_reset; + + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -224,14 +230,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(adev->vcn.inst); - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0)); + if (r) + return r; + + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -247,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r, idx; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; if (drm_dev_enter(adev_to_drm(adev), &idx)) { fw_shared->present_flag_0 = 0; @@ -260,11 +265,11 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - r = amdgpu_vcn_sw_fini(adev, 0); + amdgpu_vcn_sysfs_reset_mask_fini(adev); - kfree(adev->vcn.ip_dump); + amdgpu_vcn_sw_fini(adev, 0); - return r; + return 0; } /** @@ -848,9 +853,10 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst) static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; + int ret; vcn_v2_0_enable_static_power_gating(vinst); @@ -934,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) UVD, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, 0, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, 0, 0); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); @@ -990,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; @@ -1297,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); @@ -1355,6 +1366,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, return 0; } +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_0_stop(vinst); + if (r) + return r; + return vcn_v2_0_start(vinst); +} + static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -2071,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); } -static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i)); - } -} - static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, @@ -2144,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .wait_for_idle = vcn_v2_0_wait_for_idle, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v2_0_dump_ip_state, - .print_ip_state = vcn_v2_0_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { @@ -2176,6 +2137,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { @@ -2205,6 +2167,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 58b527a6b795..cebee453871c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev); +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst); static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, @@ -115,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work) struct amdgpu_device *adev = vcn_inst->adev; unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; unsigned int i, j; - int r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *v = &adev->vcn.inst[i]; @@ -148,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work) if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) { amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); - mutex_lock(&adev->vcn.workload_profile_mutex); - if (adev->vcn.workload_profile_active) { - r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, - false); - if (r) - dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); - adev->vcn.workload_profile_active = false; - } - mutex_unlock(&adev->vcn.workload_profile_mutex); + amdgpu_vcn_put_profile(adev); } else { schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT); } @@ -166,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me]; - int r = 0; atomic_inc(&adev->vcn.inst[0].total_submission_cnt); @@ -176,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring) * the delayed work so there is no one else to set it to false * and we don't care if someone else sets it to true. */ - if (adev->vcn.workload_profile_active) - goto pg_lock; - - mutex_lock(&adev->vcn.workload_profile_mutex); - if (!adev->vcn.workload_profile_active) { - r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, - true); - if (r) - dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); - adev->vcn.workload_profile_active = true; - } - mutex_unlock(&adev->vcn.workload_profile_mutex); - -pg_lock: mutex_lock(&adev->vcn.inst[0].vcn_pg_lock); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); @@ -217,6 +194,7 @@ pg_lock: v->pause_dpg_mode(v, &new_state); } mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock); + amdgpu_vcn_get_profile(adev); } static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring) @@ -296,12 +274,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); - uint32_t *ptr; struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << j)) continue; @@ -404,8 +380,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + adev->vcn.inst[j].reset = vcn_v2_5_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -416,14 +398,13 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5)); + if (r) + return r; + + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -439,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r, idx; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { @@ -455,17 +436,15 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } - kfree(adev->vcn.ip_dump); - return 0; } @@ -1019,9 +998,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; + int ret; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, @@ -1112,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) VCN, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ @@ -1170,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -1682,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; @@ -1816,6 +1801,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -1914,6 +1900,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -1942,6 +1929,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_5_stop(vinst); + if (r) + return r; + return vcn_v2_5_start(vinst); +} + static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -2102,66 +2099,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i)); - } -} - static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .name = "vcn_v2_5", .early_init = vcn_v2_5_early_init, @@ -2175,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .wait_for_idle = vcn_v2_5_wait_for_idle, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v2_5_dump_ip_state, - .print_ip_state = vcn_v2_5_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { @@ -2192,8 +2129,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .wait_for_idle = vcn_v2_5_wait_for_idle, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v2_5_dump_ip_state, - .print_ip_state = vcn_v2_5_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v2_5_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 9fb0d5380589..d9cf8f0feeb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst); static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); @@ -174,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, j, r; int vcn_doorbell_index = 0; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); - uint32_t *ptr; struct amdgpu_device *adev = ip_block->adev; /* @@ -192,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) } for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -289,22 +288,27 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode; + adev->vcn.inst[i].reset = vcn_v3_0_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; } - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0)); + if (r) + return r; + + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -323,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -338,17 +342,16 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } - kfree(adev->vcn.ip_dump); return 0; } @@ -1026,9 +1029,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; + int ret; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, @@ -1121,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ @@ -1185,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; int j, k, r; @@ -1706,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t reg_data = 0; int ret_code; @@ -1825,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ @@ -1875,15 +1884,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, struct amdgpu_job *job) { struct drm_gpu_scheduler **scheds; - - /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) - return -EINVAL; + struct dma_fence *fence; /* if VCN0 is harvested, we can't support AV1 */ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) return -EINVAL; + /* wait for all jobs to finish before switching to instance 0 */ + fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull); + if (fence) { + dma_fence_wait(fence, false); + dma_fence_put(fence); + } + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); @@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v3_0_stop(vinst); + if (r) + return r; + vcn_v3_0_enable_clock_gating(vinst); + vcn_v3_0_enable_static_power_gating(vinst); + return vcn_v3_0_start(vinst); +} + static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -2315,67 +2342,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); - uint32_t inst_off; - bool is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i)); - } -} - static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, @@ -2389,8 +2355,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .wait_for_idle = vcn_v3_0_wait_for_idle, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v3_0_dump_ip_state, - .print_ip_state = vcn_v3_0_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v3_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index b5071f77f78d..3ae666522d57 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); @@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; struct amdgpu_device *adev = ip_block->adev; int i, r; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); - uint32_t *ptr; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) @@ -241,7 +239,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); @@ -254,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0)); + if (r) + return r; r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) @@ -284,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -308,13 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } - - kfree(adev->vcn.ip_dump); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -1008,9 +997,10 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; + int ret; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, @@ -1093,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -1142,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -1359,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) struct mmsch_v4_0_cmd_end end = { {0} }; struct mmsch_v4_0_init_header header; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -1611,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; uint32_t tmp; int r = 0; @@ -1623,7 +1618,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { vcn_v4_0_stop_dpg_mode(vinst); - r = 0; goto done; } @@ -1807,15 +1801,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p, struct amdgpu_job *job) { struct drm_gpu_scheduler **scheds; - - /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) - return -EINVAL; + struct dma_fence *fence; /* if VCN0 is harvested, we can't support AV1 */ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) return -EINVAL; + /* wait for all jobs to finish before switching to instance 0 */ + fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull); + if (fence) { + dma_fence_wait(fence, false); + dma_fence_put(fence); + } + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] [AMDGPU_RING_PRIO_0].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); @@ -1906,22 +1904,16 @@ out: #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) - #define RADEON_VCN_ENGINE_INFO (0x30000001) -#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 - #define RENCODE_ENCODE_STANDARD_AV1 2 #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 -#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 -/* return the offset in ib if id is found, -1 otherwise - * to speed up the searching we only search upto max_offset - */ -static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) +/* return the offset in ib if id is found, -1 otherwise */ +static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start) { int i; - for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { + for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) { if (ib->ptr[i + 1] == id) return i; } @@ -1936,56 +1928,56 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_vcn_decode_buffer *decode_buffer; uint64_t addr; uint32_t val; - int idx; + int idx = 0, sidx; /* The first instance can decode anything */ if (!ring->me) return 0; - /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ - idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, - RADEON_VCN_ENGINE_INFO_MAX_OFFSET); - if (idx < 0) /* engine info is missing */ - return 0; - - val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ - if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { - decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; - - if (!(decode_buffer->valid_buf_flag & 0x1)) - return 0; - - addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | - decode_buffer->msg_buffer_address_lo; - return vcn_v4_0_dec_msg(p, job, addr); - } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { - idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, - RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); - if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) - return vcn_v4_0_limit_sched(p, job); + while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) { + val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ + if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { + decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; + + if (!(decode_buffer->valid_buf_flag & 0x1)) + return 0; + + addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | + decode_buffer->msg_buffer_address_lo; + return vcn_v4_0_dec_msg(p, job, addr); + } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { + sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx); + if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1) + return vcn_v4_0_limit_sched(p, job); + } + idx += ib->ptr[idx] / 4; } return 0; } -static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v4_0_stop(vinst); - vcn_v4_0_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_stop(vinst); + if (r) + return r; + r = vcn_v4_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata), + .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata), .get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr, .set_wptr = vcn_v4_0_unified_ring_set_wptr, @@ -2241,67 +2233,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j], - i)); - } -} - static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .name = "vcn_v4_0", .early_init = vcn_v4_0_early_init, @@ -2315,8 +2246,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .wait_for_idle = vcn_v4_0_wait_for_idle, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v4_0_dump_ip_state, - .print_ip_state = vcn_v4_0_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 5a33140f5723..eacf4e93ba2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) return 0; } +static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + + if (amdgpu_dpm_reset_vcn_is_supported(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + + return 0; +} + static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx) { struct amdgpu_vcn4_fw_shared *fw_shared; @@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); - uint32_t *ptr; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -201,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + + /* There are no per-instance irq source IDs on 4.0.3, the IH + * packets use a separate field to differentiate instances. + */ + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0, AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); if (r) @@ -213,10 +228,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; } - /* TODO: Add queue reset mask when FW fully supports it */ - adev->vcn.supported_reset = - amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -231,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) } } - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } - - r = amdgpu_vcn_sysfs_reset_mask_init(adev); + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3)); if (r) return r; - return 0; + return amdgpu_vcn_sysfs_reset_mask_init(adev); } /** @@ -261,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(&adev->ddev, &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; @@ -281,13 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } - - kfree(adev->vcn.ip_dump); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -391,7 +388,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); } - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev)) amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); return 0; @@ -848,10 +845,10 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; - int vcn_inst; + int vcn_inst, ret; uint32_t tmp; vcn_inst = GET_INST(VCN, inst_idx); @@ -944,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst, VCN, 0, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -1010,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) struct mmsch_v4_0_cmd_end end = { {0} }; struct mmsch_v4_0_3_init_header header; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -1185,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; int j, k, r, vcn_inst; uint32_t tmp; @@ -1395,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; int r = 0, vcn_inst; uint32_t tmp; @@ -1594,18 +1596,16 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { int r = 0; int vcn_inst; struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; - if (amdgpu_sriov_vf(ring->adev)) - return -EOPNOTSUPP; - - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); vcn_inst = GET_INST(VCN, ring->me); r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst); @@ -1620,9 +1620,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); vcn_v4_0_3_hw_init_inst(vinst); vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram); - r = amdgpu_ring_test_helper(ring); - return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { @@ -1875,71 +1874,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs; } -static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off, inst_id; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_id = GET_INST(VCN, i); - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j], - inst_id)); - } -} - static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, + .late_init = vcn_v4_0_3_late_init, .sw_init = vcn_v4_0_3_sw_init, .sw_fini = vcn_v4_0_3_sw_fini, .hw_init = vcn_v4_0_3_hw_init, @@ -1950,8 +1888,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .wait_for_idle = vcn_v4_0_3_wait_for_idle, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v4_0_3_dump_ip_state, - .print_ip_state = vcn_v4_0_3_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 16ade84facc7..b107ee80e472 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -147,12 +147,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; struct amdgpu_device *adev = ip_block->adev; int i, r; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); - uint32_t *ptr; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -220,7 +217,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) } adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) @@ -232,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) return r; } - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } - return 0; + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5)); + + return r; } /** @@ -257,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -278,13 +270,9 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } - kfree(adev->vcn.ip_dump); - return 0; } @@ -922,9 +910,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; + int ret; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, @@ -1005,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, VCN, inst_idx, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -1053,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -1272,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; uint32_t tmp; int r = 0; @@ -1465,18 +1459,22 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v4_0_5_stop(vinst); - vcn_v4_0_5_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_5_stop(vinst); + if (r) + return r; + r = vcn_v4_0_5_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { @@ -1591,7 +1589,7 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { struct amdgpu_device *adev = ip_block->adev; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1699,67 +1697,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j], - i)); - } -} - static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .name = "vcn_v4_0_5", .early_init = vcn_v4_0_5_early_init, @@ -1773,8 +1710,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .wait_for_idle = vcn_v4_0_5_wait_for_idle, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v4_0_5_dump_ip_state, - .print_ip_state = vcn_v4_0_5_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index f8e3f0b882da..0202df5db1e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) return 0; } -void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev) -{ - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); - uint32_t *ptr; - - /* Allocate memory for VCN IP Dump buffer */ - ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); - if (!ptr) { - DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); - adev->vcn.ip_dump = NULL; - } else { - adev->vcn.ip_dump = ptr; - } -} - /** * vcn_v5_0_0_sw_init - sw init for VCN block * @@ -144,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) int i, r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -198,9 +183,12 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - vcn_v5_0_0_alloc_ip_dump(adev); + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0)); + if (r) + return r; r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) @@ -223,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -244,13 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } - - kfree(adev->vcn.ip_dump); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -709,9 +692,10 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; + int ret; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, @@ -765,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, VCN, inst_idx, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + if (ret) { + dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret); + return ret; + } + } ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -813,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -1006,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; uint32_t tmp; int r = 0; @@ -1192,18 +1181,22 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v5_0_0_stop(vinst); - vcn_v5_0_0_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v5_0_0_stop(vinst); + if (r) + return r; + r = vcn_v5_0_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { @@ -1315,7 +1308,7 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { struct amdgpu_device *adev = ip_block->adev; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1423,67 +1416,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) } } -void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block, - struct drm_printer *p) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); - uint32_t inst_off, is_powered; - - if (!adev->vcn.ip_dump) - return; - - drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) { - drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); - continue; - } - - inst_off = i * reg_count; - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) { - drm_printf(p, "\nActive Instance:VCN%d\n", i); - for (j = 0; j < reg_count; j++) - drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name, - adev->vcn.ip_dump[inst_off + j]); - } else { - drm_printf(p, "\nInactive Instance:VCN%d\n", i); - } - } -} - -void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int i, j; - bool is_powered; - uint32_t inst_off; - uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); - - if (!adev->vcn.ip_dump) - return; - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - inst_off = i * reg_count; - /* mmUVD_POWER_STATUS is always readable and is first element of the array */ - adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); - is_powered = (adev->vcn.ip_dump[inst_off] & - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; - - if (is_powered) - for (j = 1; j < reg_count; j++) - adev->vcn.ip_dump[inst_off + j] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i)); - } -} - static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .name = "vcn_v5_0_0", .early_init = vcn_v5_0_0_early_init, @@ -1497,8 +1429,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .wait_for_idle = vcn_v5_0_0_wait_for_idle, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v5_0_0_dump_ip_state, - .print_ip_state = vcn_v5_0_0_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h index b8927652bc50..51bbccd4360f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h @@ -32,11 +32,6 @@ #define VCN_VID_IP_ADDRESS 0x0 #define VCN_AON_IP_ADDRESS 0x30000 -void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev); -void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block, - struct drm_printer *p); -void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block); - extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block; #endif /* __VCN_V5_0_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index cdefd7fcb0da..714350cabf2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -40,6 +40,40 @@ #include <drm/drm_drv.h> +static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = { + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) +}; + static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); @@ -79,6 +113,25 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) return 0; } +static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + + return 0; +} + static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx) { struct amdgpu_vcn5_fw_shared *fw_shared; @@ -153,17 +206,23 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) vcn_v5_0_1_fw_shared_init(adev, i); } - /* TODO: Add queue reset mask when FW fully supports it */ - adev->vcn.supported_reset = - amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; } - vcn_v5_0_0_alloc_ip_dump(adev); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) { + r = amdgpu_vcn_ras_sw_init(adev); + if (r) { + dev_err(adev->dev, "Failed to initialize vcn ras block!\n"); + return r; + } + } + + r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1)); + if (r) + return r; return amdgpu_vcn_sysfs_reset_mask_init(adev); } @@ -182,7 +241,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; @@ -201,15 +260,27 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) return r; } - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } - amdgpu_vcn_sysfs_reset_mask_fini(adev); - kfree(adev->vcn.ip_dump); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); + + return 0; +} + +static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i) +{ + struct amdgpu_ring *ring; + int vcn_inst; + + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst), + adev->vcn.inst[i].aid_id); return 0; } @@ -225,7 +296,7 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; - int i, r, vcn_inst; + int i, r; if (amdgpu_sriov_vf(adev)) { r = vcn_v5_0_1_start_sriov(adev); @@ -243,14 +314,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; - - if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 11 * vcn_inst), - adev->vcn.inst[i].aid_id); + vcn_v5_0_1_hw_init_inst(adev, i); /* Re-init fw_shared, if required */ vcn_v5_0_1_fw_shared_init(adev, i); @@ -284,7 +349,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); } - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev)) amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); return 0; @@ -601,11 +666,11 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared = + struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; - int vcn_inst; + int vcn_inst, ret; uint32_t tmp; vcn_inst = GET_INST(VCN, inst_idx); @@ -666,8 +731,13 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, VCN, 0, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) - amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + if (indirect) { + ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + if (ret) { + dev_err(adev->dev, "vcn sram load failed %d\n", ret); + return ret; + } + } /* resetting ring, fw should not check RB ring */ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; @@ -732,8 +802,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) struct mmsch_v5_0_cmd_end end = { {0} }; struct mmsch_v5_0_init_header header; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -907,7 +977,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r, vcn_inst; @@ -1099,7 +1169,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; uint32_t tmp; int r = 0, vcn_inst; @@ -1229,6 +1299,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring) } } +static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r = 0; + int vcn_inst; + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + vcn_inst = GET_INST(VCN, ring->me); + r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst); + + if (r) { + DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r); + return r; + } + + vcn_v5_0_1_hw_init_inst(adev, ring->me); + vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram); + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1257,6 +1352,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = vcn_v5_0_1_ring_reset, }; /** @@ -1460,7 +1556,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { .name = "vcn_v5_0_1", .early_init = vcn_v5_0_1_early_init, - .late_init = NULL, + .late_init = vcn_v5_0_1_late_init, .sw_init = vcn_v5_0_1_sw_init, .sw_fini = vcn_v5_0_1_sw_fini, .hw_init = vcn_v5_0_1_hw_init, @@ -1475,8 +1571,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_1_set_clockgating_state, .set_powergating_state = vcn_set_powergating_state, - .dump_ip_state = vcn_v5_0_0_dump_ip_state, - .print_ip_state = vcn_v5_0_0_print_ip_state, + .dump_ip_state = amdgpu_vcn_dump_ip_state, + .print_ip_state = amdgpu_vcn_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { @@ -1557,7 +1653,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank /* reference to smu driver if header file */ static int vcn_v5_0_1_err_codes[] = { - 14, 15, /* VCN */ + 14, 15, 47, /* VCN [D|V|S] */ }; static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, @@ -1603,6 +1699,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo if (r) goto late_fini; + if (amdgpu_ras_is_supported(adev, ras_block->block) && + adev->vcn.inst->ras_poison_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0); + if (r) + goto late_fini; + } + return 0; late_fini: diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9b3510e53112..a611a7345125 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -67,7 +67,6 @@ #include "sdma_v2_4.h" #include "sdma_v3_0.h" #include "dce_v10_0.h" -#include "dce_v11_0.h" #include "iceland_ih.h" #include "tonga_ih.h" #include "cz_ih.h" @@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); break; @@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); #if defined(CONFIG_DRM_AMD_ACP) @@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); #if defined(CONFIG_DRM_AMD_ACP) diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 62e88e5362e9..16e12c9913f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -5,7 +5,7 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT)) + depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT)) select HMM_MIRROR select MMU_NOTIFIER select DRM_AMDGPU_USERPTR diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a2149afa5803..0f0719528bcc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -22,7 +22,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/file.h> @@ -522,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); } - minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); - if (!minfo.cu_mask.ptr) - return -ENOMEM; - - retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); - if (retval) { + minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size); + if (IS_ERR(minfo.cu_mask.ptr)) { pr_debug("Could not copy CU mask from userspace"); - retval = -EFAULT; - goto out; + return PTR_ERR(minfo.cu_mask.ptr); } mutex_lock(&p->mutex); @@ -539,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, mutex_unlock(&p->mutex); -out: kfree(minfo.cu_mask.ptr); return retval; } @@ -1071,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, svm_range_list_lock_and_flush_work(&p->svms, current->mm); mutex_lock(&p->svms.lock); mmap_write_unlock(current->mm); - if (interval_tree_iter_first(&p->svms.objects, + + /* Skip a special case that allocates VRAM without VA, + * VA will be invalid of 0. + */ + if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) && + interval_tree_iter_first(&p->svms.objects, args->va_addr >> PAGE_SHIFT, (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { pr_err("Address: 0x%llx already allocated by SVM\n", @@ -2567,8 +2565,8 @@ static int criu_restore(struct file *filep, pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n", args->num_devices, args->num_bos, args->num_objects, args->priv_data_size); - if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size || - !args->num_devices || !args->num_bos) + if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data || + !args->priv_data_size || !args->num_devices) return -EINVAL; mutex_lock(&p->mutex); @@ -3253,8 +3251,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) int retcode = -EINVAL; bool ptrace_attached = false; - if (nr >= AMDKFD_CORE_IOCTL_COUNT) + if (nr >= AMDKFD_CORE_IOCTL_COUNT) { + retcode = -ENOTTY; goto err_i1; + } if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { u32 amdkfd_size; @@ -3267,8 +3267,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) asize = amdkfd_size; cmd = ioctl->cmd; - } else + } else { + retcode = -ENOTTY; goto err_i1; + } dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf0854bd5555..e9cfb80bd436 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -495,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) mutex_init(&kfd->doorbell_mutex); ida_init(&kfd->doorbell_ida); + atomic_set(&kfd->kfd_processes_count, 0); return kfd; } @@ -971,7 +972,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd, kfd_smi_event_update_gpu_reset(node, false, reset_context); } - kgd2kfd_suspend(kfd, false); + kgd2kfd_suspend(kfd, true); for (i = 0; i < kfd->num_nodes; i++) kfd_signal_reset_event(kfd->nodes[i]); @@ -1013,13 +1014,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) return 0; } -bool kfd_is_locked(void) +bool kfd_is_locked(struct kfd_dev *kfd) { + uint8_t id = 0; + struct kfd_node *dev; + lockdep_assert_held(&kfd_processes_mutex); - return (kfd_locked > 0); + + /* check reset/suspend lock */ + if (kfd_locked > 0) + return true; + + if (kfd) + return kfd->kfd_dev_lock > 0; + + /* check lock on all cgroup accessible devices */ + while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) { + if (!dev || kfd_devcgroup_check_permission(dev)) + continue; + + if (dev->kfd->kfd_dev_lock > 0) + return true; + } + + return false; } -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { struct kfd_node *node; int i; @@ -1027,14 +1048,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) if (!kfd->init_complete) return; - /* for runtime suspend, skip locking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (++kfd_locked == 1) - kfd_suspend_all_processes(); - mutex_unlock(&kfd_processes_mutex); - } + if (suspend_proc) + kgd2kfd_suspend_process(kfd); for (i = 0; i < kfd->num_nodes; i++) { node = kfd->nodes[i]; @@ -1042,7 +1057,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) } } -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) { int ret, i; @@ -1055,14 +1070,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } - /* for runtime resume, skip unlocking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - if (--kfd_locked == 0) - ret = kfd_resume_all_processes(); - WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); - mutex_unlock(&kfd_processes_mutex); - } + if (resume_proc) + ret = kgd2kfd_resume_process(kfd); + + return ret; +} + +void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ + if (!kfd->init_complete) + return; + + mutex_lock(&kfd_processes_mutex); + /* For first KFD device suspend all the KFD processes */ + if (++kfd_locked == 1) + kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); +} + +int kgd2kfd_resume_process(struct kfd_dev *kfd) +{ + int ret = 0; + + if (!kfd->init_complete) + return 0; + + mutex_lock(&kfd_processes_mutex); + if (--kfd_locked == 0) + ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); return ret; } @@ -1097,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) } for (i = 0; i < kfd->num_nodes; i++) { - node = kfd->nodes[i]; + /* Race if another thread in b/w + * kfd_cleanup_nodes and kfree(kfd), + * when kfd->nodes[i] = NULL + */ + if (kfd->nodes[i]) + node = kfd->nodes[i]; + else + return; + spin_lock_irqsave(&node->interrupt_lock, flags); if (node->interrupts_active @@ -1442,24 +1487,62 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } -int kgd2kfd_check_and_lock_kfd(void) +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { + struct kfd_process *p; + int r = 0, temp, idx; + mutex_lock(&kfd_processes_mutex); - if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); - return -EBUSY; + + /* kfd_processes_count is per kfd_dev, return -EBUSY without + * further check + */ + if (!!atomic_read(&kfd->kfd_processes_count)) { + pr_debug("process_wq_release not finished\n"); + r = -EBUSY; + goto out; + } + + if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd)) + goto out; + + /* fail under system reset/resume or kfd device is partition switching. */ + if (kfd_is_locked(kfd)) { + r = -EBUSY; + goto out; } - ++kfd_locked; + /* + * ensure all running processes are cgroup excluded from device before mode switch. + * i.e. no pdd was created on the process socket. + */ + idx = srcu_read_lock(&kfd_processes_srcu); + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + int i; + + for (i = 0; i < p->n_pdds; i++) { + if (p->pdds[i]->dev->kfd != kfd) + continue; + + r = -EBUSY; + goto proc_check_unlock; + } + } + +proc_check_unlock: + srcu_read_unlock(&kfd_processes_srcu, idx); +out: + if (!r) + ++kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); - return 0; + return r; } -void kgd2kfd_unlock_kfd(void) +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { mutex_lock(&kfd_processes_mutex); - --kfd_locked; + --kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); } @@ -1485,6 +1568,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) return ret; } +int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) +{ + struct kfd_node *node; + int i, r; + + if (!kfd->init_complete) + return 0; + + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + r = node->dqm->ops.unhalt(node->dqm); + if (r) { + dev_err(kfd_device, "Error in starting scheduler\n"); + return r; + } + } + return 0; +} + int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { struct kfd_node *node; @@ -1502,6 +1604,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) return node->dqm->ops.halt(node->dqm); } +int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) +{ + struct kfd_node *node; + int i, r; + + if (!kfd->init_complete) + return 0; + + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + r = node->dqm->ops.halt(node->dqm); + if (r) + return r; + } + return 0; +} + bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) { struct kfd_node *node; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 76359c6a3f3a..6c5c7c1bf5ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2312,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm) continue; /* Reset engine and check. */ - if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) || + if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) || dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) || !set_sdma_queue_as_reset(dqm, doorbell_off)) { r = -ENOTRECOVERABLE; @@ -2339,9 +2339,18 @@ reset_fail: static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma) { + struct amdgpu_device *adev = dqm->dev->adev; + while (halt_if_hws_hang) schedule(); + if (adev->debug_disable_gpu_ring_reset) { + dev_info_once(adev->dev, + "%s queue hung, but ring reset disabled", + is_sdma ? "sdma" : "compute"); + + return -EPERM; + } if (!amdgpu_gpu_recovery) return -ENOTRECOVERABLE; @@ -2716,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm, dqm_lock(dqm); mqd_mgr = dqm->mqd_mgrs[mqd_type]; - *mqd_size = mqd_mgr->mqd_size; + *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask); *ctl_stack_size = 0; if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 2b294ada3ec0..82905f3e54dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1302,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev) if (ti) { dev_err(dev->adev->dev, "Queues reset on process %s tid %d thread %s pid %d\n", - ti->process_name, ti->tgid, ti->task_name, ti->pid); + ti->process_name, ti->tgid, ti->task.comm, ti->task.pid); amdgpu_vm_put_task_info(ti); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index dbcb60eb54b2..1d170dc50df3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -23,7 +23,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/sched.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 2b0a830f5b29..fb3129883a4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, int retval; union PM4_MES_TYPE_3_HEADER nop; - if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ)) - return false; - - pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ, - queue_size); + pr_debug("Initializing queue type %d size %d\n", type, queue_size); memset(&prop, 0, sizeof(prop)); memset(&nop, 0, sizeof(nop)); @@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; break; default: + WARN(1, "Invalid queue type %d\n", type); dev_err(dev->adev->dev, "Invalid queue type %d\n", type); return false; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 79251f22b702..59a5a3fea65d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -39,22 +39,22 @@ #endif #define dev_fmt(fmt) "kfd_migrate: " fmt -static uint64_t -svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) +static u64 +svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr) { return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); } static int -svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, - dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) +svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages, + dma_addr_t *addr, u64 *gart_addr, u64 flags) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; unsigned int num_dw, num_bytes; struct dma_fence *fence; - uint64_t src_addr, dst_addr; - uint64_t pte_flags; + u64 src_addr, dst_addr; + u64 pte_flags; void *cpu_addr; int r; @@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, - &job); + &job, + AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP); if (r) return r; @@ -122,15 +123,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, static int svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, - uint64_t *vram, uint64_t npages, + u64 *vram, u64 npages, enum MIGRATION_COPY_DIR direction, struct dma_fence **mfence) { - const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; + const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - uint64_t gart_s, gart_d; + u64 gart_s, gart_d; struct dma_fence *next; - uint64_t size; + u64 size; int r; mutex_lock(&adev->mman.gtt_window_lock); @@ -260,39 +261,39 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } -static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) { - unsigned long upages = 0; + unsigned long mpages = 0; unsigned long i; for (i = 0; i < migrate->npages; i++) { - if (migrate->src[i] & MIGRATE_PFN_VALID && - !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) - upages++; + if (migrate->dst[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + mpages++; } - return upages; + return mpages; } static int svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch, uint64_t ttm_res_offset) + dma_addr_t *scratch, u64 ttm_res_offset) { - uint64_t npages = migrate->npages; + u64 npages = migrate->npages; struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; - uint64_t mpages = 0; + u64 mpages = 0; dma_addr_t *src; - uint64_t *dst; - uint64_t i, j; + u64 *dst; + u64 i, j; int r; pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, prange->last, ttm_res_offset); src = scratch; - dst = (uint64_t *)(scratch + npages); + dst = (u64 *)(scratch + npages); amdgpu_res_first(prange->ttm_res, ttm_res_offset, npages << PAGE_SHIFT, &cursor); @@ -385,11 +386,11 @@ out_free_vram_pages: static long svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, - uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) + struct vm_area_struct *vma, u64 start, + u64 end, uint32_t trigger, u64 ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); - uint64_t npages = (end - start) >> PAGE_SHIFT; + u64 npages = (end - start) >> PAGE_SHIFT; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -408,7 +409,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); buf = kvcalloc(npages, - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), GFP_KERNEL); if (!buf) goto out; @@ -447,9 +448,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - mpages = cpages - svm_migrate_unsuccessful_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - mpages, cpages, migrate.npages); + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); @@ -490,7 +491,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, { unsigned long addr, start, end; struct vm_area_struct *vma; - uint64_t ttm_res_offset; + u64 ttm_res_offset; struct kfd_node *node; unsigned long mpages = 0; long r = 0; @@ -580,14 +581,14 @@ static void svm_migrate_page_free(struct page *page) static int svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch, uint64_t npages) + dma_addr_t *scratch, u64 npages) { struct device *dev = adev->dev; - uint64_t *src; + u64 *src; dma_addr_t *dst; struct page *dpage; - uint64_t i = 0, j; - uint64_t addr; + u64 i = 0, j; + u64 addr; int r = 0; pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, @@ -595,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, addr = migrate->start; - src = (uint64_t *)(scratch + npages); + src = (u64 *)(scratch + npages); dst = scratch; for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { @@ -683,12 +684,11 @@ out_oom: */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, uint64_t end, + struct vm_area_struct *vma, u64 start, u64 end, uint32_t trigger, struct page *fault_page) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); - uint64_t npages = (end - start) >> PAGE_SHIFT; - unsigned long upages = npages; + u64 npages = (end - start) >> PAGE_SHIFT; unsigned long cpages = 0; unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; @@ -710,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; buf = kvcalloc(npages, - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), GFP_KERNEL); if (!buf) goto out; @@ -736,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); - upages = svm_migrate_unsuccessful_pages(&migrate); goto out_free; } if (cpages != npages) @@ -749,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, scratch, npages); migrate_vma_pages(&migrate); - upages = svm_migrate_unsuccessful_pages(&migrate); - pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - upages, cpages, migrate.npages); + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); @@ -764,8 +763,7 @@ out_free: start >> PAGE_SHIFT, end >> PAGE_SHIFT, node->id, 0, trigger, r); out: - if (!r && cpages) { - mpages = cpages - upages; + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); @@ -848,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, } if (r >= 0) { + WARN_ONCE(prange->vram_pages < mpages, + "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", + prange->vram_pages, mpages); prange->vram_pages -= mpages; /* prange does not have vram page set its actual_loc to system diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index aee2212e52f6..33aa23450b3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -78,8 +78,8 @@ err_ioctl: static void kfd_exit(void) { kfd_cleanup_processes(); - kfd_debugfs_fini(); kfd_process_destroy_wq(); + kfd_debugfs_fini(); kfd_procfs_shutdown(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 97933d2a3803..f2dee320fada 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac { struct v9_mqd *m = get_mqd(mqd); - *ctl_stack_size = m->cp_hqd_cntl_stack_size; + *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask); } static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) @@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size); } +static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm, + void *mqd, + void *mqd_dst, + void *ctl_stack_dst) +{ + struct v9_mqd *m; + int xcc; + uint64_t size = get_mqd(mqd)->cp_mqd_stride_size; + + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { + m = get_mqd(mqd + size * xcc); + + checkpoint_mqd(mm, m, + (uint8_t *)mqd_dst + sizeof(*m) * xcc, + (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc); + } +} + static void restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, @@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, const void *mqd_src, const void *ctl_stack_src, u32 ctl_stack_size) { - restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size); - if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) { - struct v9_mqd *m; + struct kfd_mem_obj xcc_mqd_mem_obj; + u32 mqd_ctl_stack_size; + struct v9_mqd *m; + u32 num_xcc; + int xcc; - m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; - m->cp_hqd_pq_doorbell_control |= 1 << - CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT; + uint64_t offset = mm->mqd_stride(mm, qp); + + mm->dev->dqm->current_logical_xcc_start++; + + num_xcc = NUM_XCC(mm->dev->xcc_mask); + mqd_ctl_stack_size = ctl_stack_size / num_xcc; + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = mqd_mem_obj->cpu_ptr; + if (gart_addr) + *gart_addr = mqd_mem_obj->gpu_addr; + + for (xcc = 0; xcc < num_xcc; xcc++) { + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc); + restore_mqd(mm, (void **)&m, + &xcc_mqd_mem_obj, + NULL, + qp, + (uint8_t *)mqd_src + xcc * sizeof(*m), + (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size, + mqd_ctl_stack_size); } } static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, @@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->free_mqd = kfd_free_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->get_checkpoint_info = get_checkpoint_info; - mqd->checkpoint_mqd = checkpoint_mqd; mqd->mqd_size = sizeof(struct v9_mqd); mqd->mqd_stride = mqd_stride_v9; #if defined(CONFIG_DEBUG_FS) @@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_v9_4_3; mqd->load_mqd = load_mqd_v9_4_3; mqd->update_mqd = update_mqd_v9_4_3; - mqd->restore_mqd = restore_mqd_v9_4_3; mqd->destroy_mqd = destroy_mqd_v9_4_3; mqd->get_wave_state = get_wave_state_v9_4_3; + mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3; + mqd->restore_mqd = restore_mqd_v9_4_3; } else { mqd->init_mqd = init_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; - mqd->restore_mqd = restore_mqd; mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->get_wave_state = get_wave_state; + mqd->checkpoint_mqd = checkpoint_mqd; + mqd->restore_mqd = restore_mqd; } break; case KFD_MQD_TYPE_HIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d221c58dccc3..70ef051511bb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -111,7 +111,14 @@ #define KFD_KERNEL_QUEUE_SIZE 2048 -#define KFD_UNMAP_LATENCY_MS (4000) +/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC + * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time + * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is + * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload + * The format here makes CP workload 10% of total timeout + */ +#define KFD_UNMAP_LATENCY_MS \ + ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1) #define KFD_MAX_SDMA_QUEUES 128 @@ -372,6 +379,11 @@ struct kfd_dev { /* bitmap for dynamic doorbell allocation from doorbell object */ unsigned long *doorbell_bitmap; + + /* for dynamic partitioning */ + int kfd_dev_lock; + + atomic_t kfd_processes_count; }; enum kfd_mempool { @@ -1536,7 +1548,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) int kfd_send_exception_to_runtime(struct kfd_process *p, unsigned int queue_id, uint64_t error_reason); -bool kfd_is_locked(void); +bool kfd_is_locked(struct kfd_dev *kfd); /* Compute profile */ void kfd_inc_compute_active(struct kfd_node *dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 722ac1662bdc..ddfe30c13e9d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) */ mutex_lock(&kfd_processes_mutex); - if (kfd_is_locked()) { + if (kfd_is_locked(NULL)) { pr_debug("KFD is locked! Cannot create process"); process = ERR_PTR(-EINVAL); goto out; @@ -1088,6 +1088,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pdd->runtime_inuse = false; } + atomic_dec(&pdd->dev->kfd->kfd_processes_count); + kfree(pdd); p->pdds[i] = NULL; } @@ -1649,6 +1651,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, /* Init idr used for memory handle translation */ idr_init(&pdd->alloc_idr); + atomic_inc(&dev->kfd->kfd_processes_count); + return pdd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c643e0ccec52..7fbb5c274ccc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd, q_data = (struct kfd_criu_queue_priv_data *)q_private_data; - /* data stored in this order: priv_data, mqd, ctl_stack */ + /* + * data stored in this order: + * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ q_data->mqd_size = mqd_size; q_data->ctl_stack_size = ctl_stack_size; @@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p, } static void set_queue_properties_from_criu(struct queue_properties *qp, - struct kfd_criu_queue_priv_data *q_data) + struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc) { qp->is_interop = false; qp->queue_percent = q_data->q_percent; @@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp, qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size; qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address; qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size; - qp->ctl_stack_size = q_data->ctl_stack_size; + if (q_data->type == KFD_QUEUE_TYPE_COMPUTE) + qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc; + else + qp->ctl_stack_size = q_data->ctl_stack_size; + qp->type = q_data->type; qp->format = q_data->format; } @@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p, goto exit; } - /* data stored in this order: mqd, ctl_stack */ + /* + * data stored in this order: + * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ mqd = q_extra_data; ctl_stack = mqd + q_data->mqd_size; memset(&qp, 0, sizeof(qp)); - set_queue_properties_from_criu(&qp, q_data); + set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask)); print_queue_properties(&qp); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 83d9384ac815..a499449fcb06 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -253,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid); if (task_info) { /* Report VM faults from user applications, not retry from kernel */ - if (task_info->pid) + if (task_info->task.pid) kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT( - task_info->pid, task_info->task_name)); + task_info->task.pid, task_info->task.comm)); amdgpu_vm_put_task_info(task_info); } } @@ -359,8 +359,8 @@ void kfd_smi_event_process(struct kfd_process_device *pdd, bool start) kfd_smi_event_add(0, pdd->dev, start ? KFD_SMI_EVENT_PROCESS_START : KFD_SMI_EVENT_PROCESS_END, - KFD_EVENT_FMT_PROCESS(task_info->pid, - task_info->task_name)); + KFD_EVENT_FMT_PROCESS(task_info->task.pid, + task_info->task.comm)); amdgpu_vm_put_task_info(task_info); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index a0f22ea6d15a..273f42e3afdd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) } static uint64_t -svm_range_get_pte_flags(struct kfd_node *node, +svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm, struct svm_range *prange, int domain) { struct kfd_node *bo_node; @@ -1292,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } - mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE; - - if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO) - mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE; if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; @@ -1305,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node, if (gc_ip_version >= IP_VERSION(12, 0, 0)) pte_flags |= AMDGPU_PTE_IS_PTE; - pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); + amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags); + pte_flags |= AMDGPU_PTE_READABLE; + if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) + pte_flags |= AMDGPU_PTE_WRITEABLE; return pte_flags; } @@ -1412,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", last_start, prange->start + i, last_domain ? "GPU" : "CPU"); - pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); + pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain); if (readonly) pte_flags &= ~AMDGPU_PTE_WRITEABLE; @@ -1714,9 +1713,32 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; + /* HMM requires at least READ permissions. If provided with PROT_NONE, + * unmap the memory. If it's not already mapped, this is a no-op + * If PROT_WRITE is provided without READ, warn first then unmap + */ + if (!(vma->vm_flags & VM_READ)) { + unsigned long e, s; + + svm_range_lock(prange); + if (vma->vm_flags & VM_WRITE) + pr_debug("VM_WRITE without VM_READ is not supported"); + s = max(start, prange->start); + e = min(end, prange->last); + if (e >= s) + r = svm_range_unmap_from_gpus(prange, s, e, + KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU); + svm_range_unlock(prange); + /* If unmap returns non-zero, we'll bail on the next for loop + * iteration, so just leave r and continue + */ + addr = next; + continue; + } + WRITE_ONCE(p->svms.faulting_task, current); r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, - readonly, owner, NULL, + readonly, owner, &hmm_range); WRITE_ONCE(p->svms.faulting_task, NULL); if (r) @@ -4239,7 +4261,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); break; default: - r = EINVAL; + r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 4ec73f33535e..5c98746eb72d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -530,6 +530,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", + dev->gpu->xcp ? + dev->gpu->xcp->unique_id : dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", NUM_XCC(dev->gpu->xcc_mask)); @@ -1587,7 +1589,8 @@ static int kfd_dev_create_p2p_links(void) break; if (!dev->gpu || !dev->gpu->adev || (dev->gpu->kfd->hive_id && - dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) + dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id && + amdgpu_xgmi_get_is_sharing_enabled(dev->gpu->adev, new_dev->gpu->adev))) goto next; /* check if node(s) is/are peer accessible in one direction or bi-direction */ diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c index faed84172dd4..44009aa8216e 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -21,6 +21,7 @@ * */ +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -45,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = { static int8_t pdev_num; static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE]; +static DEFINE_MUTEX(xcp_mutex); int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev) { struct platform_device *pdev; struct xcp_device *pxcp_dev; char dev_name[20]; - int ret; + int ret, i; + + guard(mutex)(&xcp_mutex); if (pdev_num >= MAX_XCP_PLATFORM_DEVICE) return -ENODEV; - snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num); + for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) { + if (!xcp_dev[i]) + break; + } + + if (i >= MAX_XCP_PLATFORM_DEVICE) + return -ENODEV; + + snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i); pdev = platform_device_register_simple(dev_name, -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); @@ -72,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev) goto out_devres; } - xcp_dev[pdev_num] = pxcp_dev; - xcp_dev[pdev_num]->pdev = pdev; + xcp_dev[i] = pxcp_dev; + xcp_dev[i]->pdev = pdev; *ddev = &pxcp_dev->drm; pdev_num++; @@ -88,16 +100,43 @@ out_unregister: } EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc); -void amdgpu_xcp_drv_release(void) +static void free_xcp_dev(int8_t index) { - for (--pdev_num; pdev_num >= 0; --pdev_num) { - struct platform_device *pdev = xcp_dev[pdev_num]->pdev; + if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) { + struct platform_device *pdev = xcp_dev[index]->pdev; devres_release_group(&pdev->dev, NULL); platform_device_unregister(pdev); - xcp_dev[pdev_num] = NULL; + + xcp_dev[index] = NULL; + pdev_num--; + } +} + +void amdgpu_xcp_drm_dev_free(struct drm_device *ddev) +{ + int8_t i; + + guard(mutex)(&xcp_mutex); + + for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) { + if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) { + free_xcp_dev(i); + break; + } + } +} +EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free); + +void amdgpu_xcp_drv_release(void) +{ + int8_t i; + + guard(mutex)(&xcp_mutex); + + for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) { + free_xcp_dev(i); } - pdev_num = 0; } EXPORT_SYMBOL(amdgpu_xcp_drv_release); diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h index c1c4b679bf95..580a1602c8e3 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h @@ -25,5 +25,6 @@ #define _AMDGPU_XCP_DRV_H_ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev); +void amdgpu_xcp_drm_dev_free(struct drm_device *ddev); void amdgpu_xcp_drv_release(void); #endif /* _AMDGPU_XCP_DRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 89d605de0595..0084a8d55254 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f58fa5da7fe5..8e1622bf7a42 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -39,13 +40,11 @@ #include "dc/dc_stat.h" #include "dc/dc_state.h" #include "amdgpu_dm_trace.h" -#include "dpcd_defs.h" #include "link/protocols/link_dpcd.h" #include "link_service_types.h" #include "link/protocols/link_dp_capability.h" #include "link/protocols/link_ddc.h" -#include "vid.h" #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_ucode.h" @@ -56,7 +55,6 @@ #include "amdgpu_dm_hdcp.h" #include <drm/display/drm_hdcp_helper.h> #include "amdgpu_dm_wb.h" -#include "amdgpu_pm.h" #include "amdgpu_atombios.h" #include "amd_shared.h" @@ -82,6 +80,7 @@ #include <linux/component.h> #include <linux/sort.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic.h> @@ -102,15 +101,6 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -#include "dcn/dcn_1_0_offset.h" -#include "dcn/dcn_1_0_sh_mask.h" -#include "soc15_hw_ip.h" -#include "soc15_common.h" -#include "vega10_ip_offset.h" - -#include "gc/gc_11_0_0_offset.h" -#include "gc/gc_11_0_0_sh_mask.h" - #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" @@ -243,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); +static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state); static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); static int amdgpu_dm_atomic_check(struct drm_device *dev, @@ -427,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc, /* * Previous frame finished and HW is ready for optimization. */ - if (update_type == UPDATE_TYPE_FAST) - dc_post_update_surfaces_to_stream(dc); + dc_post_update_surfaces_to_stream(dc); return dc_update_planes_and_stream(dc, array_of_surface_update, @@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); } +static void dm_handle_vmin_vmax_update(struct work_struct *offload_work) +{ + struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work); + struct amdgpu_device *adev = work->adev; + struct dc_stream_state *stream = work->stream; + struct dc_crtc_timing_adjust *adjust = work->adjust; + + mutex_lock(&adev->dm.dc_lock); + dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust); + mutex_unlock(&adev->dm.dc_lock); + + dc_stream_release(stream); + kfree(work->adjust); + kfree(work); +} + +static void schedule_dc_vmin_vmax(struct amdgpu_device *adev, + struct dc_stream_state *stream, + struct dc_crtc_timing_adjust *adjust) +{ + struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); + if (!offload_work) { + drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); + return; + } + + struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL); + if (!adjust_copy) { + drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); + kfree(offload_work); + return; + } + + dc_stream_retain(stream); + memcpy(adjust_copy, adjust, sizeof(*adjust_copy)); + + INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update); + offload_work->adev = adev; + offload_work->stream = stream; + offload_work->adjust = adjust_copy; + + queue_work(system_wq, &offload_work->work); +} + static void dm_vupdate_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params) * page-flip completion events that have been queued to us * if a pageflip happened inside front-porch. */ - if (vrr_active) { + if (vrr_active && acrtc->dm_irq_params.stream) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state + == VRR_STATE_ACTIVE_VARIABLE; + amdgpu_dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ - if (acrtc->dm_irq_params.stream && - adev->family < AMDGPU_FAMILY_AI) { + if (adev->family < AMDGPU_FAMILY_AI) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + schedule_dc_vmin_vmax(adev, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } } @@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && - acrtc->dm_irq_params.vrr_params.supported && - acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_VARIABLE) { + acrtc->dm_irq_params.vrr_params.supported) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } } /* @@ -1758,10 +1802,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, return DMUB_STATUS_TIMEOUT; } -static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) +static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) { - struct dml2_soc_bb *bb; + void *bb; long long addr; + unsigned int bb_size; int i = 0; uint16_t chunk; enum dmub_gpint_command send_addrs[] = { @@ -1774,6 +1819,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(4, 0, 1): + bb_size = sizeof(struct dml2_soc_bb); break; default: return NULL; @@ -1781,7 +1827,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * bb = dm_allocate_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, - sizeof(struct dml2_soc_bb), + bb_size, &addr); if (!bb) return NULL; @@ -1847,7 +1893,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.audio_lock); if (amdgpu_dm_irq_init(adev)) { - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n"); + drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n"); goto error; } @@ -2035,9 +2081,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); + adev->dm.restore_backlight = true; + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); goto error; } @@ -2053,7 +2101,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize freesync_module.\n"); + "failed to initialize freesync_module.\n"); } else drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); @@ -2064,7 +2112,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n"); } if (adev->dm.dc->caps.ips_support && @@ -2075,7 +2123,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); else drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); @@ -2085,20 +2133,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { - drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify"); + drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback"); + drm_err(adev_to_drm(adev), "fail to register dmub aux callback"); goto error; } @@ -2107,7 +2155,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, dmub_aux_fused_io_callback, false)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback"); + drm_err(adev_to_drm(adev), "fail to register dmub fused io callback"); goto error; } /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. @@ -2125,7 +2173,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dm_initialize_drm_device(adev)) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize sw for display support.\n"); + "failed to initialize sw for display support.\n"); goto error; } @@ -2140,14 +2188,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize sw for display support.\n"); + "failed to initialize vblank for display support.\n"); goto error; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) amdgpu_dm_crtc_secure_display_create_contexts(adev); if (!adev->dm.secure_display_ctx.crtc_ctx) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n"); + drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n"); if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) adev->dm.secure_display_ctx.support_mul_roi = true; @@ -2404,6 +2452,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE }; int r; @@ -2570,7 +2619,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block) adev->dm.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->dm.cgs_device) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n"); + drm_err(adev_to_drm(adev), "failed to create cgs device.\n"); return -EINVAL; } @@ -2898,7 +2947,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) return -ENOMEM; } - r = i2c_add_adapter(&oem_i2c->base); + r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base); if (r) { drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); kfree(oem_i2c); @@ -2960,8 +3009,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - kfree(adev->dm.oem_i2c); - amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); @@ -2989,14 +3036,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); - if (enable) { - if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); - } else - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); - - if (rc) - drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { + if (enable) { + if (amdgpu_dm_crtc_vrr_active( + to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, false); + + if (rc) + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", + enable ? "en" : "dis"); + } irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; /* During gpu-reset we disable and then enable vblank irq, so @@ -3060,19 +3113,68 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) +static int dm_cache_state(struct amdgpu_device *adev) { - struct amdgpu_device *adev = ip_block->adev; - - if (amdgpu_in_reset(adev)) - return 0; + int r; - WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + if (IS_ERR(adev->dm.cached_state)) { + r = PTR_ERR(adev->dm.cached_state); + adev->dm.cached_state = NULL; + } - return 0; + return adev->dm.cached_state ? 0 : r; +} + +static void dm_destroy_cached_state(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_device *ddev = adev_to_drm(adev); + struct dm_plane_state *dm_new_plane_state; + struct drm_plane_state *new_plane_state; + struct dm_crtc_state *dm_new_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i; + + if (!dm->cached_state) + return; + + /* Force mode set in atomic commit */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } + + /* + * atomic_check is expected to create the dc states. We need to release + * them here, since they were duplicated as part of the suspend + * procedure. + */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->stream) { + WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); + dc_stream_release(dm_new_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + } + dm_new_crtc_state->base.color_mgmt_changed = true; + } + + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { + dm_new_plane_state = to_dm_plane_state(new_plane_state); + if (dm_new_plane_state->dc_state) { + WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); + dc_plane_state_release(dm_new_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; + } + } + + drm_atomic_helper_resume(ddev, dm->cached_state); + + dm->cached_state = NULL; } static int dm_suspend(struct amdgpu_ip_block *ip_block) @@ -3106,9 +3208,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block) } if (!adev->dm.cached_state) { - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + int r = dm_cache_state(adev); + + if (r) + return r; } s3_handle_hdmi_cec(adev_to_drm(adev), true); @@ -3295,12 +3398,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; - struct drm_crtc *crtc; - struct drm_crtc_state *new_crtc_state; - struct dm_crtc_state *dm_new_crtc_state; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; @@ -3332,13 +3429,16 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); - if (r) + if (r) { drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); + return r; + } dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); + adev->dm.restore_backlight = true; amdgpu_dm_irq_resume_early(adev); @@ -3457,40 +3557,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } drm_connector_list_iter_end(&iter); - /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - new_crtc_state->active_changed = true; - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - reset_freesync_config_for_crtc(dm_new_crtc_state); - } - - /* - * atomic_check is expected to create the dc states. We need to release - * them here, since they were duplicated as part of the suspend - * procedure. - */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (dm_new_crtc_state->stream) { - WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); - dc_stream_release(dm_new_crtc_state->stream); - dm_new_crtc_state->stream = NULL; - } - dm_new_crtc_state->base.color_mgmt_changed = true; - } - - for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { - dm_new_plane_state = to_dm_plane_state(new_plane_state); - if (dm_new_plane_state->dc_state) { - WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); - dc_plane_state_release(dm_new_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; - } - } - - drm_atomic_helper_resume(ddev, dm->cached_state); - - dm->cached_state = NULL; + dm_destroy_cached_state(adev); /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); @@ -3536,7 +3603,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .early_fini = amdgpu_dm_early_fini, .hw_init = dm_hw_init, .hw_fini = dm_hw_fini, - .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, .is_idle = dm_is_idle, @@ -3571,23 +3637,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, - .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, + .atomic_commit_setup = amdgpu_dm_atomic_setup_commit, }; static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { + const struct drm_panel_backlight_quirk *panel_backlight_quirk; struct amdgpu_dm_backlight_caps *caps; struct drm_connector *conn_base; struct amdgpu_device *adev; struct drm_luminance_range_info *luminance_range; - int min_input_signal_override; + struct drm_device *drm; if (aconnector->bl_idx == -1 || aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) return; conn_base = &aconnector->base; - adev = drm_to_adev(conn_base->dev); + drm = conn_base->dev; + adev = drm_to_adev(drm); caps = &adev->dm.backlight_caps[aconnector->bl_idx]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; @@ -3620,9 +3688,24 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) else caps->aux_min_input_signal = 1; - min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); - if (min_input_signal_override >= 0) - caps->min_input_signal = min_input_signal_override; + panel_backlight_quirk = + drm_get_panel_backlight_quirk(aconnector->drm_edid); + if (!IS_ERR_OR_NULL(panel_backlight_quirk)) { + if (panel_backlight_quirk->min_brightness) { + caps->min_input_signal = + panel_backlight_quirk->min_brightness - 1; + drm_info(drm, + "Applying panel backlight quirk, min_brightness: %d\n", + caps->min_input_signal); + } + if (panel_backlight_quirk->brightness_mask) { + drm_info(drm, + "Applying panel backlight quirk, brightness_mask: 0x%X\n", + panel_backlight_quirk->brightness_mask); + caps->brightness_mask = + panel_backlight_quirk->brightness_mask; + } + } } DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) @@ -4003,19 +4086,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) { if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, dmub_hpd_sense_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback"); return -EINVAL; } } @@ -4720,16 +4803,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } -/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */ +/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */ static inline u32 scale_input_to_fw(int min, int max, u64 input) { - return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min); + return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min); } -/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */ +/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */ static inline u32 scale_fw_to_input(int min, int max, u64 input) { - return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL); + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL); } static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, @@ -4737,8 +4820,8 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap uint32_t *user_brightness) { u32 brightness = scale_input_to_fw(min, max, *user_brightness); - u8 prev_signal = 0, prev_lum = 0; - int i = 0; + u8 lower_signal, upper_signal, upper_lum, lower_lum, lum; + int left, right; if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) return; @@ -4746,32 +4829,54 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap if (!caps->data_points) return; - /* choose start to run less interpolation steps */ - if (caps->luminance_data[caps->data_points/2].input_signal > brightness) - i = caps->data_points/2; - do { - u8 signal = caps->luminance_data[i].input_signal; - u8 lum = caps->luminance_data[i].luminance; + /* + * Handle the case where brightness is below the first data point + * Interpolate between (0,0) and (first_signal, first_lum) + */ + if (brightness < caps->luminance_data[0].input_signal) { + lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness, + caps->luminance_data[0].input_signal); + goto scale; + } - /* - * brightness == signal: luminance is percent numerator - * brightness < signal: interpolate between previous and current luminance numerator - * brightness > signal: find next data point - */ - if (brightness > signal) { - prev_signal = signal; - prev_lum = lum; - i++; - continue; + left = 0; + right = caps->data_points - 1; + while (left <= right) { + int mid = left + (right - left) / 2; + u8 signal = caps->luminance_data[mid].input_signal; + + /* Exact match found */ + if (signal == brightness) { + lum = caps->luminance_data[mid].luminance; + goto scale; } - if (brightness < signal) - lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * - (brightness - prev_signal), - signal - prev_signal); - *user_brightness = scale_fw_to_input(min, max, - DIV_ROUND_CLOSEST(lum * brightness, 101)); - return; - } while (i < caps->data_points); + + if (signal < brightness) + left = mid + 1; + else + right = mid - 1; + } + + /* verify bound */ + if (left >= caps->data_points) + left = caps->data_points - 1; + + /* At this point, left > right */ + lower_signal = caps->luminance_data[right].input_signal; + upper_signal = caps->luminance_data[left].input_signal; + lower_lum = caps->luminance_data[right].luminance; + upper_lum = caps->luminance_data[left].luminance; + + /* interpolate */ + if (right == left || !lower_lum) + lum = upper_lum; + else + lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) * + (brightness - lower_signal), + upper_signal - lower_signal); +scale: + *user_brightness = scale_fw_to_input(min, max, + DIV_ROUND_CLOSEST(lum * brightness, 101)); } static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, @@ -4822,6 +4927,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; + /* Apply brightness quirk */ + if (caps->brightness_mask) + brightness |= caps->brightness_mask; + /* Change brightness based on AUX property */ mutex_lock(&dm->dc_lock); if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { @@ -4829,6 +4938,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, reallow_idle = true; } + if (trace_amdgpu_dm_brightness_enabled()) { + trace_amdgpu_dm_brightness(__builtin_return_address(0), + user_brightness, + brightness, + caps->aux_support, + power_supply_is_system_supplied() > 0); + } + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); @@ -4882,10 +4999,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, if (caps.aux_support) { u32 avg, peak; - bool rc; - rc = dc_link_get_backlight_level_nits(link, &avg, &peak); - if (!rc) + if (!dc_link_get_backlight_level_nits(link, &avg, &peak)) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); } @@ -4941,9 +5056,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[aconnector->bl_idx]; if (get_brightness_range(caps, &min, &max)) { if (power_supply_is_system_supplied() > 0) - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); else - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); /* min is zero, so max needs to be adjusted */ props.max_brightness = max - min; drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, @@ -4951,8 +5066,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) } else props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; - if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) + if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) { drm_info(drm, "Using custom brightness curve\n"); + props.scale = BACKLIGHT_SCALE_NON_LINEAR; + } else + props.scale = BACKLIGHT_SCALE_LINEAR; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", @@ -5368,7 +5486,8 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** @@ -6314,6 +6433,10 @@ static void fill_stream_properties_from_drm_display_mode( && aconnector && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) + && aconnector + && aconnector->force_yuv422_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -6345,13 +6468,15 @@ static void fill_stream_properties_from_drm_display_mode( (struct drm_connector *)connector, mode_in); if (err < 0) - drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err); + drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n", + connector->name, err); timing_out->vic = avi_frame.video_code; err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); if (err < 0) - drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err); + drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n", + connector->name, err); timing_out->hdmi_vic = hv_frame.vic; } @@ -7269,10 +7394,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); - if (aconnector->i2c) { - i2c_del_adapter(&aconnector->i2c->base); - kfree(aconnector->i2c); - } kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); @@ -7534,7 +7655,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = DC_FAIL_ATTACH_SURFACES; if (dc_result == DC_OK) - dc_result = dc_validate_global_state(dc, dc_state, true); + dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY); cleanup: if (dc_state) @@ -7572,6 +7693,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, bpc_limit = 8; do { + drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc); stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); @@ -7592,7 +7714,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", + DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, @@ -7607,16 +7729,41 @@ create_validate_stream_for_sink(struct drm_connector *connector, } while (stream == NULL && requested_bpc >= bpc_limit); - if ((dc_result == DC_FAIL_ENC_VALIDATE || - dc_result == DC_EXCEED_DONGLE_CAP) && - !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", - __func__, __LINE__); - - aconnector->force_yuv420_output = true; + switch (dc_result) { + /* + * If we failed to validate DP bandwidth stream with the requested RGB color depth, + * we try to fallback and configure in order: + * YUV422 (8bpc, 6bpc) + * YUV420 (8bpc, 6bpc) + */ + case DC_FAIL_ENC_VALIDATE: + case DC_EXCEED_DONGLE_CAP: + case DC_NO_DP_LINK_BANDWIDTH: + /* recursively entered twice and already tried both YUV422 and YUV420 */ + if (aconnector->force_yuv422_output && aconnector->force_yuv420_output) + break; + /* first failure; try YUV422 */ + if (!aconnector->force_yuv422_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv422_output = true; + /* recursively entered and YUV422 failed, try YUV420 */ + } else if (!aconnector->force_yuv420_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv420_output = true; + } stream = create_validate_stream_for_sink(connector, drm_mode, - dm_state, old_stream); + dm_state, old_stream); + aconnector->force_yuv422_output = false; aconnector->force_yuv420_output = false; + break; + case DC_OK: + break; + default: + drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n", + __func__, __LINE__, dc_result); + break; } return stream; @@ -7747,6 +7894,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; + if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { @@ -7758,6 +7908,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, if (!crtc) return 0; + if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (new_con_state->colorspace != old_con_state->colorspace) { new_crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(new_crtc_state)) @@ -7859,6 +8017,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || + (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + enum drm_mode_status result; + + result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode); + if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) { + drm_dbg_driver(encoder->dev, + "mode %dx%d@%dHz is not native, enabling scaling\n", + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + drm_mode_vrefresh(adjusted_mode)); + dm_new_connector_state->scaling = RMX_FULL; + } + return 0; + } + if (!aconnector->mst_output_port) return 0; @@ -7872,7 +8047,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); + mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; @@ -8096,6 +8271,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, {"1920x1200", 1920, 1200} }; + if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && + (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) + return; + n = ARRAY_SIZE(common_modes); for (i = 0; i < n; i++) { @@ -8435,6 +8614,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); } + + if (connector_type == DRM_MODE_CONNECTOR_eDP) { + struct drm_privacy_screen *privacy_screen; + + privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL); + if (!IS_ERR(privacy_screen)) { + drm_connector_attach_privacy_screen_provider(&aconnector->base, + privacy_screen); + } else if (PTR_ERR(privacy_screen) != -ENODEV) { + drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n"); + } + } } static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, @@ -8564,7 +8755,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, } aconnector->i2c = i2c; - res = i2c_add_adapter(&i2c->base); + res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base); if (res) { drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); @@ -8662,7 +8853,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dm_crtc_state *acrtc_state) -{ +{ /* + * We cannot be sure that the frontend index maps to the same + * backend index - some even map to more than one. + * So we have to go through the CRTC to find the right IRQ. + */ + int irq_type = amdgpu_display_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + struct drm_device *dev = adev_to_drm(adev); + struct drm_vblank_crtc_config config = {0}; struct dc_crtc_timing *timing; int offdelay; @@ -8715,7 +8915,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, drm_crtc_vblank_on_config(&acrtc->base, &config); + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); +#endif + } + } else { + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); +#endif + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); + } + drm_crtc_vblank_off(&acrtc->base); } } @@ -9737,7 +9965,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; - bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9857,7 +10084,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; - set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9914,13 +10140,16 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, * to fix a flicker issue. * It will cause the dm->actual_brightness is not the current panel brightness * level. (the dm->brightness is the correct panel level) - * So we set the backlight level with dm->brightness value after set mode + * So we set the backlight level with dm->brightness value after initial + * set mode. Use restore_backlight flag to avoid setting backlight level + * for every subsequent mode set. */ - if (set_backlight_level) { + if (dm->restore_backlight) { for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i]) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } + dm->restore_backlight = false; } } @@ -10023,69 +10252,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm, drm_writeback_queue_job(wb_conn, new_con_state); } -/** - * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. - * @state: The atomic state to commit - * - * This will tell DC to commit the constructed DC state from atomic_check, - * programming the hardware. Any failures here implies a hardware failure, since - * atomic check should have filtered anything non-kosher. - */ -static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state) { + struct drm_connector_state *old_con_state, *new_con_state; struct drm_device *dev = state->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_display_manager *dm = &adev->dm; - struct dm_atomic_state *dm_state; - struct dc_state *dc_state = NULL; - u32 i, j; - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - unsigned long flags; - bool wait_for_vblank = true; struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; - struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - int crtc_disable_count = 0; - - trace_amdgpu_dm_atomic_commit_tail_begin(state); - - drm_atomic_helper_update_legacy_modeset_state(dev, state); - drm_dp_mst_atomic_wait_for_dependencies(state); + struct amdgpu_device *adev = drm_to_adev(dev); + int i; - dm_state = dm_atomic_get_new_state(state); - if (dm_state && dm_state->context) { - dc_state = dm_state->context; - amdgpu_dm_commit_streams(state, dc_state); - } + if (!adev->dm.hdcp_workqueue) + return; for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_new_crtc_state; struct amdgpu_dm_connector *aconnector; - if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; aconnector = to_amdgpu_dm_connector(connector); - if (!adev->dm.hdcp_workqueue) - continue; - - pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); - - if (!connector) - continue; + drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i); - pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", connector->index, connector->status, connector->dpms); - pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n", old_con_state->content_protection, new_con_state->content_protection); if (aconnector->dc_sink) { if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { - pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", + drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n", aconnector->dc_sink->edid_caps.display_name); } } @@ -10099,7 +10299,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } if (old_crtc_state) - pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", old_crtc_state->enable, old_crtc_state->active, old_crtc_state->mode_changed, @@ -10107,29 +10307,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) old_crtc_state->connectors_changed); if (new_crtc_state) - pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", new_crtc_state->enable, new_crtc_state->active, new_crtc_state->mode_changed, new_crtc_state->active_changed, new_crtc_state->connectors_changed); - } - for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { - struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - - if (!adev->dm.hdcp_workqueue) - continue; - - new_crtc_state = NULL; - old_crtc_state = NULL; - - if (acrtc) { - new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); - old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); - } dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -10173,7 +10357,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) enable_encryption = true; - drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); if (aconnector->dc_link) hdcp_update_display( @@ -10181,6 +10365,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_con_state->hdcp_content_type, enable_encryption); } } +} + +static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int i, ret; + + ret = drm_dp_mst_atomic_setup_commit(state); + if (ret) + return ret; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* + * Color management settings. We also update color properties + * when a modeset is needed, to ensure it gets reprogrammed. + */ + if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream && + (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { + ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); + if (ret) { + drm_dbg_atomic(state->dev, "Failed to update color state\n"); + return ret; + } + } + } + + return 0; +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + + amdgpu_dm_update_hdcp(state); /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10283,6 +10539,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) &stream_update); mutex_unlock(&dm->dc_lock); kfree(dummy_updates); + + drm_connector_update_privacy_screen(new_con_state); } /** @@ -10334,6 +10592,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { uint8_t cnt; + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { if (acrtc->dm_irq_params.window_param[cnt].enable) { @@ -10636,6 +10895,8 @@ static void get_freesync_config_for_crtc( } else { config.state = VRR_STATE_INACTIVE; } + } else { + config.state = VRR_STATE_UNSUPPORTED; } out: new_crtc_state->freesync_config = config; @@ -10953,7 +11214,7 @@ skip_modeset: if (dm_new_crtc_state->base.color_mgmt_changed || dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || drm_atomic_crtc_needs_modeset(new_crtc_state)) { - ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); + ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true); if (ret) goto fail; } @@ -12156,7 +12417,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } - status = dc_validate_global_state(dc, dm_state->context, true); + status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY); if (status != DC_OK) { drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", dc_status_to_str(status), status); @@ -12537,7 +12798,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, dm_con_state = to_dm_connector_state(connector->state); - if (!adev->dm.freesync_module) + if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) goto update; edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d7d92f9911e4..009f206226f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. * @@ -152,6 +153,20 @@ struct idle_workqueue { bool running; }; +/** + * struct vupdate_offload_work - Work data for offloading task from vupdate handler + * @work: Kernel work data for the work event + * @adev: amdgpu_device back pointer + * @stream: DC stream associated with the crtc + * @adjust: DC CRTC timing adjust to be applied to the crtc + */ +struct vupdate_offload_work { + struct work_struct work; + struct amdgpu_device *adev; + struct dc_stream_state *stream; + struct dc_crtc_timing_adjust *adjust; +}; + #define MAX_LUMINANCE_DATA_POINTS 99 /** @@ -201,6 +216,11 @@ struct amdgpu_dm_backlight_caps { */ bool aux_support; /** + * @brightness_mask: After deriving brightness, OR it with this mask. + * Workaround for panels with issues with certain brightness values. + */ + u32 brightness_mask; + /** * @ac_level: the default brightness if booted on AC */ u8 ac_level; @@ -611,6 +631,13 @@ struct amdgpu_display_manager { u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; /** + * @restore_backlight: + * + * Flag to indicate whether to restore backlight after modeset. + */ + bool restore_backlight; + + /** * @aux_hpd_discon_quirk: * * quirk for hpd discon while aux is on-going. @@ -636,8 +663,9 @@ struct amdgpu_display_manager { * @bb_from_dmub: * * Bounding box data read from dmub during early initialization for DCN4+ + * Data is stored as a byte array that should be casted to the appropriate bb struct */ - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; /** * @oem_i2c: @@ -752,6 +780,9 @@ struct amdgpu_dm_connector { uint16_t vc_full_pbn; struct mutex handle_mst_msg_ready; + /* branch device specific data */ + uint32_t branch_ieee_oui; + /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; @@ -775,6 +806,7 @@ struct amdgpu_dm_connector { bool fake_enable; bool force_yuv420_output; + bool force_yuv422_output; struct dsc_preferred_settings dsc_settings; union dp_downstream_port_present mst_downstream_port_present; /* Cached display modes */ @@ -1022,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void); int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); +int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc, + bool check_only); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index ebabfe3a512f..a4ac6d442278 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -27,7 +28,6 @@ #include "amdgpu_dm.h" #include "dc.h" #include "modules/color/color_gamma.h" -#include "basics/conversion.h" /** * DOC: overview @@ -566,12 +566,11 @@ static int __set_output_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } -static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, +static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, const struct drm_color_lut *regamma_lut, uint32_t regamma_size, bool has_rom, enum dc_transfer_func_predefined tf) { - struct dc_transfer_func *out_tf = &stream->out_transfer_func; int ret = 0; if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { @@ -821,7 +820,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); const struct drm_color_lut *shaper = NULL, *lut3d = NULL; uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE; - bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut; + bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend; /* shaper LUT is only available if 3D LUT color caps */ exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0; @@ -885,33 +884,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) } /** - * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC. * @crtc: amdgpu_dm crtc state + * @check_only: only check color state without update dc stream * - * With no plane level color management properties we're free to use any - * of the HW blocks as long as the CRTC CTM always comes before the - * CRTC RGM and after the CRTC DGM. - * - * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. - * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. - * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * This function just verifies CRTC LUT sizes, if there is enough space for + * output transfer function and if its parameters can be calculated by AMD + * color module. It also adjusts some settings for programming CRTC degamma at + * plane stage, using plane DGM block. * * The RGM block is typically more fully featured and accurate across * all ASICs - DCE can't support a custom non-linear CRTC DGM. * * For supporting both plane level color management and CRTC level color - * management at once we have to either restrict the usage of CRTC properties - * or blend adjustments together. + * management at once we have to either restrict the usage of some CRTC + * properties or blend adjustments together. * * Returns: - * 0 on success. Error code if setup fails. + * 0 on success. Error code if validation fails. */ -int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) + +int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc, + bool check_only) { struct dc_stream_state *stream = crtc->stream; struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); bool has_rom = adev->asic_type <= CHIP_RAVEN; - struct drm_color_ctm *ctm = NULL; + struct dc_transfer_func *out_tf; const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; @@ -940,6 +939,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) crtc->cm_has_degamma = false; crtc->cm_is_degamma_srgb = false; + if (check_only) { + out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL); + if (!out_tf) + return -ENOMEM; + } else { + out_tf = &stream->out_transfer_func; + } + /* Setup regamma and degamma. */ if (is_legacy) { /* @@ -954,8 +961,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; - stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = TRANSFER_FUNCTION_SRGB; /* * Note: although we pass has_rom as parameter here, we never * actually use ROM because the color module only takes the ROM @@ -963,16 +970,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * * See more in mod_color_calculate_regamma_params() */ - r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut, + r = __set_legacy_tf(out_tf, regamma_lut, regamma_size, has_rom); - if (r) - return r; } else { regamma_size = has_regamma ? regamma_size : 0; - r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut, + r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut, regamma_size, has_rom, tf); - if (r) - return r; } /* @@ -981,6 +984,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * have to place the CTM in the OCSC in that case. */ crtc->cm_has_degamma = has_degamma; + if (check_only) + kvfree(out_tf); + + return r; +} + +/** + * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * @crtc: amdgpu_dm crtc state + * + * With no plane level color management properties we're free to use any + * of the HW blocks as long as the CRTC CTM always comes before the + * CRTC RGM and after the CRTC DGM. + * + * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. + * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. + * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * + * The RGM block is typically more fully featured and accurate across + * all ASICs - DCE can't support a custom non-linear CRTC DGM. + * + * For supporting both plane level color management and CRTC level color + * management at once we have to either restrict the usage of CRTC properties + * or blend adjustments together. + * + * Returns: + * 0 on success. Error code if setup fails. + */ +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) +{ + struct dc_stream_state *stream = crtc->stream; + struct drm_color_ctm *ctm = NULL; + int ret; + + ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false); + if (ret) + return ret; /* Setup CRTC CTM. */ if (crtc->base.ctm) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 033bd817d871..e20aa7438066 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index 3da056c8d20b..95bdb8699d7f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2019 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 2551823382f8..1ec9d03ad747 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -218,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work) break; } - if (idle_work->enable) + if (idle_work->enable) { + dc_post_update_surfaces_to_stream(idle_work->dm->dc); dc_allow_idle_optimizations(idle_work->dm->dc, true); + } mutex_unlock(&idle_work->dm->dc_lock); } idle_work->dm->idle_workqueue->running = false; @@ -273,8 +275,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) + if (dm->active_vblank_irq_count == 0) { + dc_post_update_surfaces_to_stream(dm->dc); dc_allow_idle_optimizations(dm->dc, true); + } mutex_unlock(&dm->dc_lock); @@ -299,12 +303,35 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); if (enable) { - /* vblank irq on -> Only need vupdate irq in vrr mode */ - if (amdgpu_dm_crtc_vrr_active(acrtc_state)) - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); - } else { - /* vblank irq off -> vupdate irq off */ - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + struct dc *dc = adev->dm.dc; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) || + pr->config.replay_supported; + + /* + * IPS & self-refresh feature can cause vblank counter resets between + * vblank disable and enable. + * It may cause system stuck due to waiting for the vblank counter. + * Call this function to estimate missed vblanks by using timestamps and + * update the vblank counter in DRM. + */ + if (dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && + sr_supported && vblank->config.disable_immediate) + drm_crtc_vblank_restore(crtc); + } + + if (dc_supports_vrr(dm->dc->ctx->dce_version)) { + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_crtc_vrr_active(acrtc_state)) + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + } } if (rc) @@ -661,6 +688,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { + struct drm_plane_state *primary_state; + + /* Pull in primary plane for correct VRR handling */ + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index c7d13e743e6c..f263e1a4537e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -3106,6 +3107,35 @@ static int replay_get_state(void *data, u64 *val) } /* + * Start / Stop capture Replay residency + */ +static int replay_set_residency(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + bool is_start = (val != 0); + u32 residency = 0; + + link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY); + return 0; +} + +/* + * Read Replay residency + */ +static int replay_get_residency(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + u32 residency = 0; + + link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY); + *val = (u64)residency; + + return 0; +} + +/* * Read PSR state */ static int psr_get(void *data, u64 *val) @@ -3324,7 +3354,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g dmcub_trace_event_state_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n"); - +DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency, + "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, "%llu\n"); @@ -3502,6 +3533,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) debugfs_create_file("replay_capability", 0444, dir, connector, &replay_capability_fops); debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops); + debugfs_create_file_unsafe("replay_residency", 0444, dir, + connector, &replay_residency_fops); debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); debugfs_create_file_unsafe("psr_residency", 0444, dir, @@ -3988,7 +4021,7 @@ static int capabilities_show(struct seq_file *m, void *unused) struct hubbub *hubbub = dc->res_pool->hubbub; - if (hubbub->funcs->get_mall_en) + if (hubbub && hubbub->funcs->get_mall_en) hubbub->funcs->get_mall_en(hubbub, &mall_in_use); if (dc->cap_funcs.get_subvp_en) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index 071200473c27..122cdc124b3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2018 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c16962256514..19038f336155 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2019 Advanced Micro Devices, Inc. * @@ -222,6 +223,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; link_adjust.auth_delay = 2; + link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; @@ -571,6 +573,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 2; + link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; link->adjust.hdcp1.disable = 0; hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; @@ -723,8 +726,8 @@ ret: static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ - .write_new = srm_data_write, - .read_new = srm_data_read, + .write = srm_data_write, + .read = srm_data_read, }; struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, @@ -765,14 +768,18 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs; config->psp.handle = &adev->psp; - if (dc->ctx->dce_version == DCN_VERSION_3_1 || + if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || - dc->ctx->dce_version == DCN_VERSION_3_5 || + dc->ctx->dce_version == DCN_VERSION_3_16 || + dc->ctx->dce_version == DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_21 || + dc->ctx->dce_version == DCN_VERSION_3_5 || dc->ctx->dce_version == DCN_VERSION_3_51 || - dc->ctx->dce_version == DCN_VERSION_3_6 || - dc->ctx->dce_version == DCN_VERSION_3_16) + dc->ctx->dce_version == DCN_VERSION_3_6 || + dc->ctx->dce_version == DCN_VERSION_4_01) config->psp.caps.dtm_v3_supported = 1; + config->ddc.handle = dc_get_link_at_index(dc, i); ddc_funcs->write_i2c = lp_write_i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 69b445b011c8..4faa344f196e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2019 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 9e3e51a2dc49..fe100e4c9801 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index b61e210f6246..a1c722112c22 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h index ba17c23b2706..4f6b58f4f90d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2015 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 6c9de834455b..3c9995275cbd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2020 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 25e8befbcc47..5e92eaa67aa3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2012-15 Advanced Micro Devices, Inc. * @@ -107,7 +108,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (payload.write && result >= 0) { if (result) { /*one byte indicating partially written bytes*/ - drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n"); result = payload.data[0]; } else if (!payload.reply[0]) /*I2C_ACK|AUX_ACK*/ @@ -133,11 +134,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } - drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result); } if (payload.reply[0]) - drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.", payload.reply[0]); return result; @@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect return true; } +static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + struct drm_dp_mst_port *port_parent; + struct drm_dp_aux *immediate_upstream_aux; + struct drm_dp_desc branch_desc; + + if (!port->parent) + return false; + + port_parent = port->parent->port_parent; + + immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux; + + if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true)) + return false; + + aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) + + (branch_desc.ident.oui[1] << 8) + + (branch_desc.ident.oui[2]); + + drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n", + aconnector->branch_ieee_oui, connector->name); + + return true; +} + static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_set_path_property(connector, pathprop); + if (!retrieve_branch_specific_data(aconnector)) + aconnector->branch_ieee_oui = 0; + /* * Initialize connector state before adding the connectror to drm and * framebuffer lists @@ -809,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_init(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); + drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false); if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) return; @@ -821,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_connector_attach_dp_subconnector_property(&aconnector->base); } -int dm_mst_get_pbn_divider(struct dc_link *link) +uint32_t dm_mst_get_pbn_divider(struct dc_link *link) { + uint32_t pbn_div_x100; + uint64_t dividend, divisor; + if (!link) return 0; - return dc_link_bandwidth_kbps(link, - dc_link_get_link_cap(link)) / (8 * 1000 * 54); + dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100; + divisor = 8 * 1000 * 54; + + pbn_div_x100 = div64_u64(dividend, divisor); + + return dfixed_const(pbn_div_x100) / 100; } struct dsc_mst_fairness_params { @@ -1762,14 +1802,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin union lane_count_set lane_count; u8 dp_link_encoding; u8 link_bw_set = 0; + u8 data[16] = {0}; *cur_link_bw = 0; - if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 || - drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 || - drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1) + if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16) return false; + dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET]; + link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET]; + lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET]; + + drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n", + dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET); + switch (dp_link_encoding) { case DP_8b_10b_ENCODING: link_rate = link_bw_set; @@ -1866,8 +1912,10 @@ enum dc_status dm_dp_mst_is_port_support_mode( end_link_bw = aconnector->mst_local_bw; } - if (end_link_bw > 0 && stream_kbps > end_link_bw) { - DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + if (end_link_bw > 0 && + stream_kbps > end_link_bw && + aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) { + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. " "Mode required bw can't fit into last link\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 600d6e221011..6f7ea684b555 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2012-15 Advanced Micro Devices, Inc. * @@ -59,7 +60,7 @@ enum mst_msg_ready_type { struct amdgpu_display_manager; struct amdgpu_dm_connector; -int dm_mst_get_pbn_divider(struct dc_link *link); +uint32_t dm_mst_get_pbn_divider(struct dc_link *link); void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index b7c6e8d13435..e027798ece03 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -92,9 +92,9 @@ enum dm_micro_swizzle { MICRO_SWIZZLE_R = 3 }; -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier) { - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); + return amdgpu_lookup_format_info(pixel_format, modifier); } void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, @@ -146,7 +146,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64 if (*cap - *size < 1) { uint64_t new_cap = *cap * 2; - uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); + uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL); if (!new_mods) { kfree(*mods); @@ -732,7 +732,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig if (adev->family < AMDGPU_FAMILY_AI) return 0; - *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); + *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); @@ -1633,7 +1633,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, drm_object_attach_property(&plane->base, dm->adev->mode_info.plane_ctm_property, 0); - if (dpp_color_caps.hw_3d_lut) { + if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) { drm_object_attach_property(&plane->base, mode_info.plane_shaper_lut_property, 0); drm_object_attach_property(&plane->base, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 615d2ab2b803..ea2619b507db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned long possible_crtcs, const struct dc_plane_cap *plane_cap); -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier); void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 848c5b4bb301..11b2ea6edf95 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements( const struct dm_pp_single_disp_config *dc_cfg = &pp_display_cfg->disp_configs[i]; adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; + adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock; } amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index f984cb0cb889..fd491b7a3cd7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2021 Advanced Micro Devices, Inc. * @@ -26,7 +27,6 @@ #include "amdgpu_dm_psr.h" #include "dc_dmub_srv.h" #include "dc.h" -#include "dm_helpers.h" #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" @@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); - if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) - return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + } ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index e2366321a3c1..4fb8626913cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2021 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 41f07f13a7b5..80704d709e44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2023 Advanced Micro Devices, Inc. * @@ -30,7 +31,7 @@ #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" #include "dmub/inc/dmub_cmd.h" -#include "dc/inc/link.h" +#include "dc/inc/link_service.h" /* * amdgpu_dm_link_supports_replay() - check if the link supports replay diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h index 8126bdb1eb6b..73b6c67ae5e7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2021 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 0005f5f8f34f..8550d5e8b753 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -1,3 +1,4 @@ +//SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc func_name, line); } -void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx) +void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx) { } -void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx) +void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx) { } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 4686d4b0cbad..aa56fd6d56c3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -1,3 +1,4 @@ +//SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -726,6 +727,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state, ) ); +TRACE_EVENT(amdgpu_dm_brightness, + TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac), + TP_ARGS(function, user_brightness, converted_brightness, aux, ac), + TP_STRUCT__entry( + __field(void *, function) + __field(u32, user_brightness) + __field(u32, converted_brightness) + __field(bool, aux) + __field(bool, ac) + ), + TP_fast_assign( + __entry->function = function; + __entry->user_brightness = user_brightness; + __entry->converted_brightness = converted_brightness; + __entry->aux = aux; + __entry->ac = ac; + ), + TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s", + (void *)__entry->function, + (u32)__entry->user_brightness, + (u32)__entry->converted_brightness, + (__entry->aux) ? "true" : "false", + (__entry->ac) ? "AC" : "DC" + ) +); + #endif /* _AMDGPU_DM_TRACE_H_ */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 3c9ecea7eebc..dc943abd6dba 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -37,6 +37,7 @@ DC_LIBS += dcn301 DC_LIBS += dcn31 DC_LIBS += dml DC_LIBS += dml2 +DC_LIBS += soc_and_ip_translator endif DC_LIBS += dce120 diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index d897f8a30ede..4da5adab799c 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -1136,7 +1136,7 @@ static void calculate_bandwidth( } } } - data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1)); + data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1)); data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips); data->total_display_reads_required_data = bw_int_to_fixed(0); data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0); diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index 452206b5095e..6073cadde76c 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg) dc_fixpt_mul( square, res), - n * (n - 1))); + (long long)n * (n - 1))); n -= 2; } while (n != 0); diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 6d2924114a3e..b413a672c2c0 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -170,7 +170,7 @@ bool dal_vector_remove_at_index( memmove( vector->container + (index * vector->struct_size), vector->container + ((index + 1) * vector->struct_size), - (vector->count - index - 1) * vector->struct_size); + (size_t)(vector->count - index - 1) * vector->struct_size); vector->count -= 1; return true; @@ -219,7 +219,7 @@ bool dal_vector_insert_at( memmove( insert_address + vector->struct_size, insert_address, - vector->struct_size * (vector->count - position)); + (size_t)vector->struct_size * (vector->count - position)); memmove( insert_address, @@ -271,7 +271,7 @@ struct vector *dal_vector_clone( /* copy vector's data */ memmove(vec_cloned->container, vector->container, - vec_cloned->struct_size * vec_cloned->capacity); + (size_t)vec_cloned->struct_size * vec_cloned->capacity); return vec_cloned; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 67f08495b7e6..154fd2c18e88 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } - if (tbl->ucNumberOfObjects <= i) { - dm_error("Can't find connector id %d in connector table of size %d.\n", - i, tbl->ucNumberOfObjects); + if (tbl->ucNumberOfObjects <= i) return object_id; - } id = le16_to_cpu(tbl->asObjects[i].usObjectID); object_id = object_id_from_bios_object_id(id); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 2bcae0643e61..58e88778da7f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = - (uint8_t)bp_params->fractional_feedback_divider; + (uint8_t)(bp_params->fractional_feedback_divider / 100000); allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2c645dffec18..f2b1720a6a66 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7( process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate; process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask; } + dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction; } // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index d9955c5d2e5e..60021671b386 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) ############################################################################### # DCN30 ############################################################################### -CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o +CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 4c3e58c730b1..15cf13ec5302 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -28,7 +28,7 @@ #include "dccg.h" #include "clk_mgr_internal.h" #include "dc_state_priv.h" -#include "link.h" +#include "link_service.h" #include "dce100/dce_clk_mgr.h" #include "dce110/dce110_clk_mgr.h" @@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - if (!stream->dpms_off || (stream_status && stream_status->plane_count)) + if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count)) display_count++; } @@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p return NULL; } dce60_clk_mgr_construct(ctx, clk_mgr); - dce_clk_mgr_construct(ctx, clk_mgr); return &clk_mgr->base; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..6131ede2db7a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = { /* ClocksStateLow */ { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, /* ClocksStateNominal */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 }, /* ClocksStatePerformance */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } }; int dentist_get_divider_from_did(int did) { @@ -245,6 +245,11 @@ int dce_set_clock( pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */ + if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 || + clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4) + pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0; + if (clk_mgr_dce->dfs_bypass_active) pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; @@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ @@ -462,6 +463,9 @@ void dce_clk_mgr_construct( clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; + base->clks.max_supported_dispclk_khz = + clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + dce_clock_read_integrated_info(clk_mgr); dce_clock_read_ss_info(clk_mgr); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index f8409453434c..d50b9440210e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -120,9 +120,15 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { + struct dc *dc = context->clk_mgr->ctx->dc; int j; int num_cfgs = 0; + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; + for (j = 0; j < context->stream_count; j++) { int k; @@ -158,12 +164,29 @@ void dce110_fill_display_configs( stream->link->cur_link_settings.link_rate; cfg->link_settings.link_spread = stream->link->cur_link_settings.link_spread; - cfg->sym_clock = stream->phy_pix_clk; + cfg->pixel_clock = stream->phy_pix_clk; /* Round v_refresh*/ cfg->v_refresh = stream->timing.pix_clk_100hz * 100; cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; + + /* Find first CRTC index and calculate its line time. + * This is necessary for DPM on SI GPUs. + */ + if (cfg->pipe_idx < pp_display_cfg->crtc_index) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = cfg->pipe_idx; + pp_display_cfg->line_time_in_us = + timing->h_total * 10000 / timing->pix_clk_100hz; + } + } + + if (!num_cfgs) { + pp_display_cfg->crtc_index = 0; + pp_display_cfg->line_time_in_us = 0; } pp_display_cfg->display_count = num_cfgs; @@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg); - /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; - } - if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index 0267644717b2..69dd80d9f738 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - int dprefclk_wdivider; - int dp_ref_clk_khz; - int target_div; + struct dc_context *ctx = clk_mgr_base->ctx; + int dp_ref_clk_khz = 0; - /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; + if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) + dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; + else + dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ @@ -160,6 +147,8 @@ void dce60_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + struct clk_mgr *base = &clk_mgr->base; + dce_clk_mgr_construct(ctx, clk_mgr); memcpy(clk_mgr->max_clks_by_state, @@ -170,5 +159,8 @@ void dce60_clk_mgr_construct( clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; clk_mgr->base.funcs = &dce60_funcs; + + base->clks.max_supported_dispclk_khz = + clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h index fa09c594fd36..06da34676965 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h @@ -56,6 +56,7 @@ #define DALSMC_MSG_SetDisplayRefreshFromMall 0xF #define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10 #define DALSMC_MSG_BacoAudioD3PME 0x11 -#define DALSMC_Message_Count 0x12 +#define DALSMC_MSG_SmartAccess 0x12 +#define DALSMC_Message_Count 0x13 #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 8083a553c60e..ef77fcd164ed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -30,6 +30,7 @@ #include "dce100/dce_clk_mgr.h" #include "dcn30/dcn30_clk_mgr.h" #include "dml/dcn30/dcn30_fpu.h" +#include "dcn30/dcn30m_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, .notify_link_rate_change = dcn30_notify_link_rate_change, - .is_smu_present = dcn3_is_smu_present + .is_smu_present = dcn3_is_smu_present, + .set_smartmux_switch = dcn30m_set_smartmux_switch }; static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index 3253115a153d..827bc2431d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un /* handle DALSMC_Result_CmdRejectedBusy? */ - TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); return reg; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c new file mode 100644 index 000000000000..8e8a11c7437e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "clk_mgr_internal.h" +#include "dcn30/dcn30m_clk_mgr.h" +#include "dcn30m_clk_mgr_smu_msg.h" + + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h new file mode 100644 index 000000000000..757985b2eadc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30M_CLK_MGR_H__ +#define __DCN30M_CLK_MGR_H__ + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set); + +#endif //__DCN30M_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c new file mode 100644 index 000000000000..0dd0583ff21e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c @@ -0,0 +1,118 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn30m_clk_mgr_smu_msg.h" + +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" + +#include "dalsmc.h" + +#define mmDAL_MSG_REG 0x1628A +#define mmDAL_ARG_REG 0x16273 +#define mmDAL_RESP_REG 0x16274 + +#define REG(reg_name) \ + mm ## reg_name + +#include "logger_types.h" +#undef DC_LOGGER +#define DC_LOGGER \ + CTX->logger +#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } + + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, + unsigned int delay_us, unsigned int max_retries) +{ + uint32_t reg = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + /* handle DALSMC_Result_CmdRejectedBusy? */ + + /* Log? */ + + return reg; +} + +static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + uint32_t msg_id, uint32_t param_in, uint32_t *param_out) +{ + uint32_t result; + /* Wait for response register to be ready */ + dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + if (IS_SMU_TIMEOUT(result)) + dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000); + + /* Wait for response */ + if (result == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + return true; + } + + return false; +} + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set) +{ + uint32_t response = 0; + + smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set); + + dcn30m_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SmartAccess, pins_to_set, &response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h new file mode 100644 index 000000000000..8a59a473fc5e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ +#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ + +#include "core_types.h" + +struct clk_mgr_internal; + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set); +#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 9e2ef0e724fc..7aee02d56292 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params( { int i, j; struct clk_bw_params *bw_params = clk_mgr->base.bw_params; + uint32_t max_dispclk = 0, max_dppclk = 0; j = -1; @@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params( return; } + /* dispclk and dppclk can be max at any voltage, same number of levels for both */ + if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS && + clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) { + max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); + max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); + } else { + ASSERT(0); + } + bw_params->clk_table.num_entries = j + 1; for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) { @@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params( bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage); + + /* Now update clocks we do read */ + bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; + bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; } bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS); + bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS); + bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index bc123f1884da..051052bd10c9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -47,7 +47,7 @@ #include "dcn30/dcn30_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "logger_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 91d872d6d392..9e63fa72101c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -48,7 +48,7 @@ #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "dcn314_smu.h" @@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger + #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define regCLK1_CLK0_DFS_CNTL 0x0269 +#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0 +#define regCLK1_CLK1_DFS_CNTL 0x026c +#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0 +#define regCLK1_CLK2_DFS_CNTL 0x026f +#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0 +#define regCLK1_CLK3_DFS_CNTL 0x0272 +#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0 +#define regCLK1_CLK4_DFS_CNTL 0x0275 +#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0 +#define regCLK1_CLK5_DFS_CNTL 0x0278 +#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0 + +#define regCLK1_CLK0_CURRENT_CNT 0x02fb +#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK1_CURRENT_CNT 0x02fc +#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK2_CURRENT_CNT 0x02fd +#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK3_CURRENT_CNT 0x02fe +#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK4_CURRENT_CNT 0x02ff +#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK5_CURRENT_CNT 0x0300 +#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0 + +#define regCLK1_CLK0_BYPASS_CNTL 0x028a +#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0 +#define regCLK1_CLK1_BYPASS_CNTL 0x0293 +#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0 #define regCLK1_CLK2_BYPASS_CNTL 0x029c #define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 +#define regCLK1_CLK3_BYPASS_CNTL 0x02a5 +#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0 +#define regCLK1_CLK4_BYPASS_CNTL 0x02ae +#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0 +#define regCLK1_CLK5_BYPASS_CNTL 0x02b7 +#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0 + +#define regCLK1_CLK0_DS_CNTL 0x0283 +#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0 +#define regCLK1_CLK1_DS_CNTL 0x028c +#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0 +#define regCLK1_CLK2_DS_CNTL 0x0295 +#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0 +#define regCLK1_CLK3_DS_CNTL 0x029e +#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0 +#define regCLK1_CLK4_DS_CNTL 0x02a7 +#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0 +#define regCLK1_CLK5_DS_CNTL 0x02b0 +#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0 + +#define regCLK1_CLK0_ALLOW_DS 0x0284 +#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0 +#define regCLK1_CLK1_ALLOW_DS 0x028d +#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0 +#define regCLK1_CLK2_ALLOW_DS 0x0296 +#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0 +#define regCLK1_CLK3_ALLOW_DS 0x029f +#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0 +#define regCLK1_CLK4_ALLOW_DS 0x02a8 +#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0 +#define regCLK1_CLK5_ALLOW_DS 0x02b1 +#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10 @@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr) { struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int); + struct clk_log_info log_info = {0}; memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate @@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr) dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); else clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + + dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info); + clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000; } void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, @@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->work_arounds.skip_clock_update) return; + display_count = dcn314_get_active_display_cnt_wa(dc, context); + /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything * also if safe to lower is false, we just go in the higher state @@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn314_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; @@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; @@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a, return true; } -static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + +static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + // read dtbclk + internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); + internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL); + + // read dcfclk + internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); + internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL); + + // read dcf deep sleep divider + internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); + internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS); + + // read dppclk + internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); + internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL); + + // read dprefclk + internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); + internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL); + + // read dispclk + internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); + internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL); +} + +void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { - return; + + struct dcn35_clk_internal internal = {0}; + + dcn314_dump_clk_registers_internal(&internal, clk_mgr_base); + + regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; + regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10; + regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS; + regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; + regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; + regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; + regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; + + regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + regs_and_bypass->dppclk_bypass = 0; + regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + regs_and_bypass->dcfclk_bypass = 0; + regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + regs_and_bypass->dispclk_bypass = 0; + regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + regs_and_bypass->dprefclk_bypass = 0; + } static struct clk_bw_params dcn314_bw_params = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h index 002c28e80720..0577eb527bc3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h @@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx, void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); + +void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info); + + #endif //__DCN314_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index e4d22f74f986..b315ed91e010 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -46,7 +46,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "link.h" +#include "link_service.h" #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 49efea0c8fcf..1769b1f26e75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -39,7 +39,7 @@ #include "dcn316_smu.h" #include "dm_helpers.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" // DCN316 this is CLK1 instance #define MAX_INSTANCE 7 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 084994c650c4..7da7b41bd092 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -33,7 +33,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #include "atomfirmware.h" #include "dcn32_smu13_driver_if.h" @@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &num_entries_per_clk->num_fclk_levels); clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); - if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { - num_levels = num_entries_per_clk->num_memclk_levels; - } else { - num_levels = num_entries_per_clk->num_fclk_levels; - } + num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels); + clk_mgr_base->bw_params->max_memclk_mhz = clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz; clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index cf2d35363e8b..5d80fdf63ffc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un udelay(delay_us); } while (max_retries--); - TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + return reg; } @@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m *total_delay_us += delay_us; } while (max_retries--); - TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx); return reg; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index bb1ac12a2b09..b11383fba35f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -44,7 +44,7 @@ #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "logger_types.h" #undef DC_LOGGER @@ -587,9 +587,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a, return true; } -static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, +static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + // read dtbclk + internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); + internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL); + + // read dcfclk + internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); + internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL); + + // read dcf deep sleep divider + internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); + internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS); + + // read dppclk + internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); + internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL); + + // read dprefclk + internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); + internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL); + + // read dispclk + internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); + internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL); +} + +static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr_dcn35 *clk_mgr) { + struct dcn35_clk_internal internal = {0}; + char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"}; + + dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base); + + regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; + regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10; + regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS; + regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; + regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; + regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; + regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; + + regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + regs_and_bypass->dppclk_bypass = 0; + regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + regs_and_bypass->dcfclk_bypass = 0; + regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + regs_and_bypass->dispclk_bypass = 0; + regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; + if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + regs_and_bypass->dprefclk_bypass = 0; + + if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { + DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n"); + + DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n", + regs_and_bypass->dcfclk, + regs_and_bypass->dcf_deep_sleep_divider, + regs_and_bypass->dcf_deep_sleep_allow, + bypass_clks[(int) regs_and_bypass->dcfclk_bypass]); + + DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n", + regs_and_bypass->dprefclk, + bypass_clks[(int) regs_and_bypass->dprefclk_bypass]); + + DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n", + regs_and_bypass->dispclk, + bypass_clks[(int) regs_and_bypass->dispclk_bypass]); + + // REGISTER VALUES + DC_LOG_SMU("reg_name,value,clk_type"); + + DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk", + internal.CLK1_CLK3_CURRENT_CNT); + + DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk", + internal.CLK1_CLK4_CURRENT_CNT); + + DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider", + internal.CLK1_CLK3_DS_CNTL); + + DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow", + internal.CLK1_CLK3_ALLOW_DS); + + DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk", + internal.CLK1_CLK2_CURRENT_CNT); + + DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk", + internal.CLK1_CLK0_CURRENT_CNT); + + DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk", + internal.CLK1_CLK1_CURRENT_CNT); + + DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass", + internal.CLK1_CLK3_BYPASS_CNTL); + + DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass", + internal.CLK1_CLK2_BYPASS_CNTL); + + DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass", + internal.CLK1_CLK0_BYPASS_CNTL); + + DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass", + internal.CLK1_CLK1_BYPASS_CNTL); + + } } static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) @@ -623,6 +732,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr) void dcn35_init_clocks(struct clk_mgr *clk_mgr) { struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int); init_clk_states(clk_mgr); @@ -633,6 +743,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr) else clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35); + + clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10; + if (clk_mgr->boot_snapshot.dtbclk > 59000) { + /*dtbclk enabled based on */ + clk_mgr->clks.dtbclk_en = true; + } } static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, @@ -1323,7 +1440,7 @@ void dcn35_clk_mgr_construct( dcn35_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ - dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); + dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base); clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 4b17d2fcd565..306016c1f109 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -13,7 +13,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #include "atomfirmware.h" @@ -22,8 +22,6 @@ #include "dcn/dcn_4_1_0_offset.h" #include "dcn/dcn_4_1_0_sh_mask.h" -#include "dml/dcn401/dcn401_fpu.h" - #define DCN_BASE__INST0_SEG1 0x000000C0 #define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 @@ -164,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c unsigned int i; char *entry_i = (char *)entry_0; - uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF); + uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF); if (ret & (1 << 31)) /* fine-grained, only min and max */ @@ -176,50 +174,43 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c /* if the initial message failed, num_levels will be 0 */ for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) { - *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF); + *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF); entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]); } } static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr) { - /* legacy */ - DC_FP_START(); - dcn401_build_wm_range_table_fpu(clk_mgr); - DC_FP_END(); - - if (clk_mgr->ctx->dc->debug.using_dml21) { - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; - - /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; - } else { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; - } - - /* Set 1B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; + /* For min clocks use as reported by PM FW and report those as min */ + uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; + uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + + /* Set A - Normal - default values */ + clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; + + /* Set B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; + + /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ + /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ + if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; + } else { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; } + + /* Set 1B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; } void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) @@ -240,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) clk_mgr->smu_present = false; clk_mgr->dpm_present = false; - if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver)) + if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver)) clk_mgr->smu_present = true; if (!clk_mgr->smu_present) return; - dcn30_smu_check_driver_if_version(clk_mgr); - dcn30_smu_check_msg_header_version(clk_mgr); + dcn401_smu_check_driver_if_version(clk_mgr); + dcn401_smu_check_msg_header_version(clk_mgr); /* DCFCLK */ dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_entries_per_clk->num_dcfclk_levels); - clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0; @@ -262,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK, &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, &num_entries_per_clk->num_socclk_levels); - clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0; @@ -272,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_entries_per_clk->num_dtbclk_levels); - clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0; @@ -282,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, &num_entries_per_clk->num_dispclk_levels); - clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0; @@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_build_wm_range_table(clk_mgr_base); } +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->smu_present && clk_mgr->dpm_present && + ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz)); +} + static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { @@ -1308,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base) table->Watermarks.WatermarkRow[i].WmSetting = i; table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type; } - dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32); - dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF); + dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32); + dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF); dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr); } @@ -1380,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz; } - clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0; @@ -1389,16 +1399,12 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn401_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); - clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz == clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz) clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0; - if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { - num_levels = num_entries_per_clk->num_memclk_levels; - } else { - num_levels = num_entries_per_clk->num_fclk_levels; - } + num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels); clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; @@ -1490,6 +1496,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn401_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz, @@ -1505,6 +1540,8 @@ static struct clk_mgr_funcs dcn401_funcs = { .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist, .get_hard_min_memclk = dcn401_get_hard_min_memclk, .get_hard_min_fclk = dcn401_get_hard_min_fclk, + .is_dc_mode_present = dcn401_is_dc_mode_present, + .get_max_clock_khz = dcn401_get_max_clock_khz, }; struct clk_mgr_internal *dcn401_clk_mgr_construct( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h index 6c9ae5ca2c7e..97a1ce1e8a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h @@ -105,10 +105,13 @@ struct dcn401_clk_mgr { }; void dcn401_init_clocks(struct clk_mgr *clk_mgr_base); +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base); struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx, struct dccg *dccg); void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); + #endif /* __DCN401_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c index 21c35528f61f..3a263840893e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c @@ -57,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin /* Wait for response register to be ready */ dcn401_smu_wait_for_response(clk_mgr, 10, 200000); + TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx); + /* Clear response register */ REG_WRITE(DAL_RESP_REG, 0); @@ -71,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin if (param_out) *param_out = REG_READ(DAL_ARG_REG); + TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx); return true; } + TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx); return false; } @@ -102,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_ *total_delay_us += delay_us; } while (max_retries--); - TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); - return reg; } @@ -115,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg /* Wait for response register to be ready */ dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us); + TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx); + /* Clear response register */ REG_WRITE(DAL_RESP_REG, 0); @@ -124,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); - TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); - /* Wait for response */ if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { if (param_out) *param_out = REG_READ(DAL_ARG_REG); *total_delay_us = delay1_us + delay2_us; + TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx); return true; } *total_delay_us = delay1_us + 2000000; + TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx); + return false; +} + +bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version) +{ + smu_print("SMU Get SMU version\n"); + + if (dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_GetSmuVersion, 0, version)) { + + smu_print("SMU version: %d\n", *version); + + return true; + } + + return false; +} + +/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */ +bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr) +{ + uint32_t response = 0; + + smu_print("SMU Check driver if version\n"); + + if (dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_GetDriverIfVersion, 0, &response)) { + + smu_print("SMU driver if version: %d\n", response); + + if (response == SMU14_DRIVER_IF_VERSION) + return true; + } + + return false; +} + +/* Message output should match DALSMC_VERSION in dalsmc.h */ +bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr) +{ + uint32_t response = 0; + + smu_print("SMU Check msg header version\n"); + + if (dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) { + + smu_print("SMU msg header version: %d\n", response); + + if (response == DALSMC_VERSION) + return true; + } + return false; } @@ -163,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi smu_print("Numways for SubVP : %d\n", num_ways); } +void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) +{ + smu_print("SMU Set DRAM addr high: %d\n", addr_high); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL); +} + +void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low) +{ + smu_print("SMU Set DRAM addr low: %d\n", addr_low); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL); +} + void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) { smu_print("SMU Transfer WM table DRAM 2 SMU\n"); @@ -348,3 +421,52 @@ unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr return response; } + +/* + * Frequency in MHz returned in lower 16 bits for valid DPM level + * + * Call with dpm_level = 0xFF to query features, return value will be: + * Bits 7:0 - number of DPM levels + * Bit 28 - 1 = auto DPM on + * Bit 29 - 1 = sweep DPM on + * Bit 30 - 1 = forced DPM on + * Bit 31 - 0 = discrete, 1 = fine-grained + * + * With fine-grained DPM, only min and max frequencies will be reported + * + * Returns 0 on failure + */ +unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level) +{ + uint32_t response = 0; + + /* bits 23:16 for clock type, lower 8 bits for DPM level */ + uint32_t param = (clk << 16) | dpm_level; + + smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_GetDpmFreqByIndex, param, &response); + + smu_print("SMU dpm freq: %d MHz\n", response); + + return response; +} + +/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */ +unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk) +{ + uint32_t response = 0; + + /* bits 23:16 for clock type */ + uint32_t param = clk << 16; + + smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response); + + smu_print("SMU DC mode max DMP freq: %d MHz\n", response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h index e02eb1294b37..4f5ac603e822 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h @@ -7,11 +7,17 @@ #include "os_types.h" #include "core_types.h" -#include "dcn32/dcn32_clk_mgr_smu_msg.h" +struct clk_mgr_internal; + +bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version); +bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr); +bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr); void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support); void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support); void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); +void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high); +void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low); void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); @@ -29,5 +35,7 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz); void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays); unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk); +unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level); #endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b34b5b52236d..5f2d5638c819 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -60,7 +60,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #include "dm_helpers.h" #include "mem_input.h" @@ -84,6 +84,7 @@ #if defined(CONFIG_DRM_AMD_DC_FP) #include "dml2/dml2_internal_types.h" +#include "soc_and_ip_translator.h" #endif #include "dce/dmub_outbox.h" @@ -217,11 +218,24 @@ static bool create_links( connectors_num, num_virtual_links); - // condition loop on link_count to allow skipping invalid indices + /* When getting the number of connectors, the VBIOS reports the number of valid indices, + * but it doesn't say which indices are valid, and not every index has an actual connector. + * So, if we don't find a connector on an index, that is not an error. + * + * - There is no guarantee that the first N indices will be valid + * - VBIOS may report a higher amount of valid indices than there are actual connectors + * - Some VBIOS have valid configurations for more connectors than there actually are + * on the card. This may be because the manufacturer used the same VBIOS for different + * variants of the same card. + */ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { + struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); struct link_init_data link_init_params = {0}; struct dc_link *link; + if (connector_id.id == CONNECTOR_ID_UNKNOWN) + continue; + DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); link_init_params.ctx = dc->ctx; @@ -446,7 +460,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * avoid conflicting with firmware updates. */ if (dc->ctx->dce_version > DCE_VERSION_MAX) { - if (dc->optimized_required || dc->wm_optimized_required) { + if (dc->optimized_required && + (stream->adjust.v_total_max != adjust->v_total_max || + stream->adjust.v_total_min != adjust->v_total_min)) { stream->adjust.timing_adjust_pending = true; return false; } @@ -934,21 +950,24 @@ static void dc_destruct(struct dc *dc) } dc_destroy_resource_pool(dc); - +#ifdef CONFIG_DRM_AMD_DC_FP + dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator); +#endif if (dc->link_srv) link_destroy_link_service(&dc->link_srv); - if (dc->ctx->gpio_service) - dal_gpio_service_destroy(&dc->ctx->gpio_service); + if (dc->ctx) { + if (dc->ctx->gpio_service) + dal_gpio_service_destroy(&dc->ctx->gpio_service); - if (dc->ctx->created_bios) - dal_bios_parser_destroy(&dc->ctx->dc_bios); + if (dc->ctx->created_bios) + dal_bios_parser_destroy(&dc->ctx->dc_bios); + kfree(dc->ctx->logger); + dc_perf_trace_destroy(&dc->ctx->perf_trace); - kfree(dc->ctx->logger); - dc_perf_trace_destroy(&dc->ctx->perf_trace); - - kfree(dc->ctx); - dc->ctx = NULL; + kfree(dc->ctx); + dc->ctx = NULL; + } kfree(dc->bw_vbios); dc->bw_vbios = NULL; @@ -976,6 +995,8 @@ static bool dc_construct_ctx(struct dc *dc, if (!dc_ctx) return false; + dc_stream_init_rmcm_3dlut(dc); + dc_ctx->cgs_device = init_params->cgs_device; dc_ctx->driver_context = init_params->driver; dc_ctx->dc = dc; @@ -1135,6 +1156,9 @@ static bool dc_construct(struct dc *dc, dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); DC_FP_END(); } + dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version); + if (!dc->soc_and_ip_translator) + goto fail; #endif if (!create_links(dc, init_params->num_virtual_links)) @@ -2381,7 +2405,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; - res = dc_validate_with_context(dc, set, params->stream_count, context, false); + res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); /* * Only update link encoder to stream assignment after bandwidth validation passed. @@ -2395,6 +2419,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params goto fail; } + /* + * If not already seamless, make transition seamless by inserting intermediate minimal transition + */ + if (dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) { + res = commit_minimal_transition_state(dc, context); + if (res != DC_OK) { + BREAK_TO_DEBUGGER(); + goto fail; + } + } + res = dc_commit_state_no_check(dc, context); for (i = 0; i < params->stream_count; i++) { @@ -2541,7 +2577,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) } dc->optimized_required = false; - dc->wm_optimized_required = false; } bool dc_set_generic_gpio_for_stereo(bool enable, @@ -3020,8 +3055,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; } - - dc->optimized_required |= dc->wm_optimized_required; } return type; @@ -3277,6 +3310,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->adaptive_sync_infopacket) stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; + if (update->avi_infopacket) + stream->avi_infopacket = *update->avi_infopacket; + if (update->dither_option) stream->dither_option = *update->dither_option; @@ -3304,7 +3340,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (dsc_validate_context) { stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; - if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, + DC_VALIDATE_MODE_ONLY) != DC_OK) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; @@ -3373,7 +3410,7 @@ static void update_seamless_boot_flags(struct dc *dc, int surface_count, struct dc_stream_state *stream) { - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower * bandwidth. Important to note that it is expected UEFI will @@ -3526,7 +3563,7 @@ static bool update_planes_and_stream_state(struct dc *dc, } if (update_type == UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; } @@ -3570,7 +3607,8 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->vsp_infopacket || stream_update->hfvsif_infopacket || stream_update->adaptive_sync_infopacket || - stream_update->vtem_infopacket) { + stream_update->vtem_infopacket || + stream_update->avi_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -4132,7 +4170,7 @@ static void commit_planes_for_stream(struct dc *dc, } if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) - dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST); + dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL); if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) @@ -4632,7 +4670,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, + DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { /* prevent underflow and corruption when reconfiguring pipes */ force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { @@ -5041,6 +5080,7 @@ static bool full_update_required(struct dc *dc, stream_update->hfvsif_infopacket || stream_update->vtem_infopacket || stream_update->adaptive_sync_infopacket || + stream_update->avi_infopacket || stream_update->dpms_off || stream_update->allow_freesync || stream_update->vrr_active_variable || @@ -5084,129 +5124,6 @@ static bool fast_update_only(struct dc *dc, && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } -static bool update_planes_and_stream_v1(struct dc *dc, - struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state) -{ - const struct dc_stream_status *stream_status; - enum surface_update_type update_type; - struct dc_state *context; - struct dc_context *dc_ctx = dc->ctx; - int i, j; - struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - - dc_exit_ips_for_hw_access(dc); - - populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); - stream_status = dc_stream_get_status(stream); - context = dc->current_state; - - update_type = dc_check_update_surfaces_for_stream( - dc, srf_updates, surface_count, stream_update, stream_status); - /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. - * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip - * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. - */ - force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); - - if (update_type >= UPDATE_TYPE_FULL) { - - /* initialize scratch memory for building context */ - context = dc_state_create_copy(state); - if (context == NULL) { - DC_ERROR("Failed to allocate new validate context!\n"); - return false; - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - - if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) - new_pipe->plane_state->force_full_update = true; - } - } else if (update_type == UPDATE_TYPE_FAST) { - /* - * Previous frame finished and HW is ready for optimization. - */ - dc_post_update_surfaces_to_stream(dc); - } - - for (i = 0; i < surface_count; i++) { - struct dc_plane_state *surface = srf_updates[i].surface; - - copy_surface_update_to_plane(surface, &srf_updates[i]); - - if (update_type >= UPDATE_TYPE_MED) { - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = - &context->res_ctx.pipe_ctx[j]; - - if (pipe_ctx->plane_state != surface) - continue; - - resource_build_scaling_params(pipe_ctx); - } - } - } - - copy_stream_update_to_stream(dc, context, stream, stream_update); - - if (update_type >= UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { - DC_ERROR("Mode validation failed for stream update!\n"); - dc_state_release(context); - return false; - } - } - - TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - - if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && - !dc->debug.enable_legacy_fast_update) { - commit_planes_for_stream_fast(dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } else { - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } - /*update current_State*/ - if (dc->current_state != context) { - - struct dc_state *old = dc->current_state; - - dc->current_state = context; - dc_state_release(old); - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->plane_state && pipe_ctx->stream == stream) - pipe_ctx->plane_state->force_full_update = false; - } - } - - /* Legacy optimization path for DCE. */ - if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { - dc_post_update_surfaces_to_stream(dc); - TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); - } - return true; -} - static bool update_planes_and_stream_v2(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -5439,8 +5356,8 @@ bool dc_update_planes_and_stream(struct dc *dc, else ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - - if (ret) + if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_01)) clear_update_flags(srf_updates, surface_count, stream); return ret; @@ -5464,14 +5381,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (dc->ctx->dce_version >= DCN_VERSION_4_01) { ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + } else { ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - } else - ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, - stream_update, state); + } - if (ret) + if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) clear_update_flags(srf_updates, surface_count, stream); } @@ -5544,6 +5459,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } break; + case DC_ACPI_CM_POWER_STATE_D3: + if (dc->caps.ips_support) + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); + + if (dc->caps.ips_v2_support) { + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); + } + break; default: ASSERT(dc->current_state->stream_count == 0); dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); @@ -5700,8 +5624,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); } } - - DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", + if (!dc->caps.is_apu) + DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); @@ -6341,13 +6265,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); } -/* - ***************************************************************************** +/** * dc_get_power_profile_for_dc_state() - extracts power profile from dc state * * Called when DM wants to make power policy decisions based on dc_state * - ***************************************************************************** + * @context: Pointer to the dc_state from which the power profile is extracted. + * + * Return: The power profile structure containing the power level information. */ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) { @@ -6363,13 +6288,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state return profile; } -/* - ********************************************************************************** +/** * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state * - * Called when DM wants to log detile buffer size from dc_state + * This function is called to log the detile buffer size from the dc_state. + * + * @context: a pointer to the dc_state from which the detile buffer size is extracted. * - ********************************************************************************** + * Return: the size of the detile buffer, or 0 if not available. */ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) { @@ -6380,26 +6306,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) else return 0; } + /** - *********************************************************************************************** * dc_get_host_router_index: Get index of host router from a dpia link * * This function return a host router index of the target link. If the target link is dpia link. * - * @param [in] link: target link - * @param [out] host_router_index: host router index of the target link + * @link: Pointer to the target link (input) + * @host_router_index: Pointer to store the host router index of the target link (output). * - * @return: true if the host router index is found and valid. + * Return: true if the host router index is found and valid. * - *********************************************************************************************** */ bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) { - struct dc *dc = link->ctx->dc; + struct dc *dc; - if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) return false; + dc = link->ctx->dc; + if (link->link_index < dc->lowest_dpia_link_index) return false; @@ -6433,3 +6360,21 @@ bool dc_can_clear_cursor_limit(struct dc *dc) return false; } + +void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, + struct dc_underflow_debug_data *out_data) +{ + struct timing_generator *tg = NULL; + + for (int i = 0; i < MAX_PIPES; i++) { + if (dc->res_pool->timing_generators[i] && + dc->res_pool->timing_generators[i]->inst == primary_otg_inst) { + tg = dc->res_pool->timing_generators[i]; + break; + } + } + + dc_exit_ips_for_hw_access(dc); + if (dc->hwss.get_underflow_debug_data) + dc->hwss.get_underflow_debug_data(dc, tg, out_data); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 7551d0a3fe82..bbce751b485f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status) return "Insufficient DP link bandwidth"; case DC_FAIL_HW_CURSOR_SUPPORT: return "HW Cursor not supported"; + case DC_FAIL_DP_TUNNEL_BW_VALIDATE: + return "Fail DP Tunnel BW validation"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7014b8d000bb..d82b1cb467f4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color( } } +/* Visual Confirm color definition for Smart Mux */ +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + const struct tg_color sm_ver_colors[5] = { + {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */ + {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */ + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/ + }; + + if (dc->caps.is_apu) { + /* APU driving the eDP */ + *color = sm_ver_colors[dc->config.smart_mux_version]; + } else { + /* dGPU driving the eDP - red */ + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } +} + /* Visual Confirm color definition for VABC */ void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, @@ -1151,6 +1177,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c tg = otg_master->stream_res.tg; if (tg->funcs->wait_odm_doublebuffer_pending_clear) tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + if (tg->funcs->wait_otg_disable) + tg->funcs->wait_otg_disable(tg); } /* ODM update may require to reprogram blank pattern for each OPP */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 814f68d76257..a180f68f711c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -24,7 +24,7 @@ #include "link_enc_cfg.h" #include "resource.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER dc->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 71e15da4bb69..9acd30019717 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -33,8 +33,9 @@ * dc.h with detail interface documentation, then add function implementation * in this file which calls link functions. */ -#include "link.h" +#include "link_service.h" #include "dce/dce_i2c.h" + struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index) { if (link_index >= MAX_LINKS) @@ -515,7 +516,15 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) link->dc->link_srv->enable_hpd_filter(link, enable); } -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - return dc->link_srv->validate_dpia_bandwidth(streams, count); + return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx); } + +void dc_link_get_alpm_support(struct dc_link *link, + bool *auxless_support, + bool *auxwake_support) +{ + link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 3da25bd8b578..bc5dedf5f60c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -40,7 +40,7 @@ #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #include "clk_mgr.h" #include "dc_state_priv.h" #include "dc_stream_priv.h" @@ -95,7 +95,6 @@ #define DC_LOGGER \ dc->ctx->logger #define DC_LOGGER_INIT(logger) - #include "dml2/dml2_wrapper.h" #define UNABLE_TO_SPLIT -1 @@ -165,7 +164,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) case FAMILY_NV: dc_version = DCN_VERSION_2_0; - if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) { + if (asic_id.chip_id == DEVICE_ID_NV_13FE || + asic_id.chip_id == DEVICE_ID_NV_143F || + asic_id.chip_id == DEVICE_ID_NV_13F9 || + asic_id.chip_id == DEVICE_ID_NV_13FA || + asic_id.chip_id == DEVICE_ID_NV_13FB || + asic_id.chip_id == DEVICE_ID_NV_13FC || + asic_id.chip_id == DEVICE_ID_NV_13DB) { dc_version = DCN_VERSION_2_01; break; } @@ -2143,7 +2148,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master, h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right + - otg_master->hblank_borrow; + otg_master->dsc_padding_params.dsc_hactive_padding; width = h_active / count; if (otg_master->stream_res.tg) @@ -3940,7 +3945,9 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count && stream->audio_info.flags.all) { + stream->audio_info.mode_count && + (stream->audio_info.flags.all || + (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); @@ -4053,7 +4060,7 @@ static bool add_all_planes_for_stream( * @set: An array of dc_validation_set with all the current streams reference * @set_count: Total of streams * @context: New context - * @fast_validate: Enable or disable fast validation + * @validate_mode: identify the validation mode * * This function updates the potential new stream in the context object. It * creates multiple lists for the add, remove, and unchanged streams. In @@ -4068,7 +4075,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; @@ -4242,7 +4249,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); } - res = dc_validate_global_state(dc, context, fast_validate); + res = dc_validate_global_state(dc, context, validate_mode); /* calculate pixel rate divider after deciding pxiel clock & odm combine */ if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) { @@ -4259,39 +4266,33 @@ fail: return res; } +#if defined(CONFIG_DRM_AMD_DC_FP) +#endif /* CONFIG_DRM_AMD_DC_FP */ + /** - * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context. + * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding. * @pipe_ctx: Pointer to the pipe context structure. * - * This function calculates the horizontal blanking borrow value for a given pipe context based on the + * This function calculates the timing parameters for a given pipe context based on the * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less - * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the - * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow + * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the + * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding * value to 0. */ -static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) +static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx) { - uint32_t hactive; - uint32_t ceil_slice_width; struct dc_stream_state *stream = NULL; if (!pipe_ctx) return; stream = pipe_ctx->stream; + pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0; + pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0; - if (stream->timing.flags.DSC) { - hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - - /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/ - if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) { - ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1; - pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive; + if (stream) + pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz; - if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32) - pipe_ctx->hblank_borrow = 0; - } - } } /** @@ -4299,7 +4300,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) * * @dc: dc struct for this driver * @new_ctx: state to be validated - * @fast_validate: set to true if only yes/no to support matters + * @validate_mode: identify the validation mode * * Checks hardware resource availability and bandwidth requirement. * @@ -4309,7 +4310,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate) + enum dc_validate_mode validate_mode) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; @@ -4334,7 +4335,7 @@ enum dc_status dc_validate_global_state( /* Decide whether hblank borrow is needed and save it in pipe_ctx */ if (dc->debug.enable_hblank_borrow) - decide_hblank_borrow(pipe_ctx); + calculate_timing_params_for_dsc_with_padding(pipe_ctx); if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && @@ -4368,7 +4369,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate); + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode); return result; } @@ -4409,8 +4410,14 @@ static void set_avi_info_frame( unsigned int fr_ind = pipe_ctx->stream->timing.fr_index; enum dc_timing_3d_format format; + if (stream->avi_infopacket.valid) { + *info_packet = stream->avi_infopacket; + return; + } + memset(&hdmi_info, 0, sizeof(union hdmi_info_packet)); + color_space = pipe_ctx->stream->output_color_space; if (color_space == COLOR_SPACE_UNKNOWN) color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index fe9f99f1bdf9..f976ffd6d466 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = - get_link_index_from_dpia_port_index(dc, notify->link_index); + get_link_index_from_dpia_port_index(dc, notify->instance); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 4db7383720fd..c61300a7cb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { struct dc_state *state; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); -#endif state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt->use_clock_dc_limits = false; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) { + if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) { dc_state_release(state); return NULL; } - dml2_opt->use_clock_dc_limits = true; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) { + if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { dc_state_release(state); return NULL; } @@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream( return DC_ERROR_UNEXPECTED; } + dc_stream_release_3dlut_for_stream(dc, stream); + dc_stream_release(state->streams[i]); state->stream_count--; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index b883fb24fa12..9ac2d41f8fca 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -316,6 +316,9 @@ bool dc_stream_set_cursor_attributes( { bool result = false; + if (!stream) + return false; + if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) { stream->cursor_attributes = *attributes; result = true; @@ -331,7 +334,10 @@ bool dc_stream_program_cursor_attributes( struct dc *dc; bool reset_idle_optimizations = false; - dc = stream ? stream->ctx->dc : NULL; + if (!stream) + return false; + + dc = stream->ctx->dc; if (dc_stream_set_cursor_attributes(stream, attributes)) { dc_z10_restore(dc); @@ -857,6 +863,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) } /* +* dc_stream_get_3dlut() +* Requirements: +* 1. Is stream already owns an RMCM instance, return it. +* 2. If it doesn't and we don't need to allocate, return NULL. +* 3. If there's a free RMCM instance, assign to stream and return it. +* 4. If no free RMCM instances, return NULL. +*/ + +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + // see if one is allocated for this stream + for (int i = 0; i < num_rmcm; i++) { + if (dc->res_pool->rmcm_3dlut[i].isInUse && + dc->res_pool->rmcm_3dlut[i].stream == stream) + return &dc->res_pool->rmcm_3dlut[i]; + } + + //case: not found one, and dont need to allocate + if (!allocate_one) + return NULL; + + //see if there is an unused 3dlut, allocate + for (int i = 0; i < num_rmcm; i++) { + if (!dc->res_pool->rmcm_3dlut[i].isInUse) { + dc->res_pool->rmcm_3dlut[i].isInUse = true; + dc->res_pool->rmcm_3dlut[i].stream = stream; + return &dc->res_pool->rmcm_3dlut[i]; + } + } + + //dont have a 3dlut + return NULL; +} + + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream) +{ + struct dc_rmcm_3dlut *rmcm_3dlut = + dc_stream_get_3dlut_for_stream(dc, stream, false); + + if (rmcm_3dlut) { + rmcm_3dlut->isInUse = false; + rmcm_3dlut->stream = NULL; + rmcm_3dlut->protection_bits = 0; + } +} + + +void dc_stream_init_rmcm_3dlut(struct dc *dc) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + for (int i = 0; i < num_rmcm; i++) { + dc->res_pool->rmcm_3dlut[i].isInUse = false; + dc->res_pool->rmcm_3dlut[i].stream = NULL; + dc->res_pool->rmcm_3dlut[i].protection_bits = 0; + } +} + +/* * Finds the greatest index in refresh_rate_hz that contains a value <= refresh */ static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f41073c0147e..98f0b6b3c213 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -46,6 +46,8 @@ #include "dmub/inc/dmub_cmd.h" +#include "sspl/dc_spl_types.h" + struct abm_save_restore; /* forward declaration */ @@ -53,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.334" +#define DC_VER "3.2.351" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -66,8 +68,11 @@ struct dmub_notification; #define MAX_STREAMS 6 #define MIN_VIEWPORT_SIZE 12 #define MAX_NUM_EDP 2 +#define MAX_SUPPORTED_FORMATS 7 + #define MAX_HOST_ROUTERS_NUM 3 -#define MAX_DPIA_PER_HOST_ROUTER 2 +#define MAX_DPIA_PER_HOST_ROUTER 3 +#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER) /* Display Core Interfaces */ struct dc_versions { @@ -193,6 +198,34 @@ struct dpp_color_caps { struct rom_curve_caps ogam_rom_caps; }; +/* Below structure is to describe the HW support for mem layout, extend support + range to match what OS could handle in the roadmap */ +struct lut3d_caps { + uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */ + struct { + uint32_t swizzle_3d_rgb : 1; + uint32_t swizzle_3d_bgr : 1; + uint32_t linear_1d : 1; + } mem_layout_support; + struct { + uint32_t unorm_12msb : 1; + uint32_t unorm_12lsb : 1; + uint32_t float_fp1_5_10 : 1; + } mem_format_support; + struct { + uint32_t order_rgba : 1; + uint32_t order_bgra : 1; + } mem_pixel_order_support; + /*< size options are 9, 17, 33, 45, 65 */ + struct { + uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */ + uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */ + uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */ + uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */ + uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */ + } lut_dim_caps; +}; + /** * struct mpc_color_caps - color pipeline capabilities for multiple pipe and * plane combined blocks @@ -201,17 +234,25 @@ struct dpp_color_caps { * @ogam_ram: programmable out gamma LUT * @ocsc: output color space conversion matrix * @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT + * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single * instance * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT + * @mcm_3d_lut_caps: HW support cap for MCM LUT memory + * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory + * @preblend: whether color manager supports preblend with MPC */ struct mpc_color_caps { uint16_t gamut_remap : 1; uint16_t ogam_ram : 1; uint16_t ocsc : 1; uint16_t num_3dluts : 3; + uint16_t num_rmcm_3dluts : 3; uint16_t shared_3d_lut:1; struct rom_curve_caps ogam_rom_caps; + struct lut3d_caps mcm_3d_lut_caps; + struct lut3d_caps rmcm_3d_lut_caps; + bool preblend; }; /** @@ -271,6 +312,7 @@ struct dc_caps { bool dmcub_support; bool zstate_support; bool ips_support; + bool ips_v2_support; uint32_t num_of_internal_disp; enum dp_protocol_version max_dp_protocol_version; unsigned int mall_size_per_mem_channel; @@ -308,6 +350,8 @@ struct dc_caps { struct dc_scl_caps scl_caps; uint8_t num_of_host_routers; uint8_t num_of_dpias_per_host_router; + /* limit of the ODM only, could be limited by other factors (like pipe count)*/ + uint8_t max_odm_combine_factor; }; struct dc_bug_wa { @@ -462,6 +506,7 @@ struct dc_config { bool use_spl; bool prefer_easf; bool use_pipe_ctx_sync_logic; + int smart_mux_version; bool ignore_dpref_ss; bool enable_mipi_converter_optimization; bool use_default_clock_table; @@ -472,6 +517,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_rcg; unsigned int disable_ips_in_vpb; bool disable_ips_in_dpms_off; bool usb4_bw_alloc_support; @@ -484,6 +530,8 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + struct spl_sharpness_range dcn_sharpness_range; + struct spl_sharpness_range dcn_override_sharpness_range; }; enum visual_confirm { @@ -495,6 +543,7 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_SMARTMUX_DGPU = 10, VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, VISUAL_CONFIRM_MCLK_SWITCH = 16, @@ -646,6 +695,15 @@ struct dc_clocks { int idle_fclk_khz; int subvp_prefetch_dramclk_khz; int subvp_prefetch_fclk_khz; + + /* Stutter efficiency is technically not clock values + * but stored here so the values are part of the update_clocks call similar to num_ways + * Efficiencies are stored as percentage (0-100) + */ + struct { + uint8_t base_efficiency; //LP1 + uint8_t low_power_efficiency; //LP2 + } stutter_efficiency; }; struct dc_bw_validation_profile { @@ -773,6 +831,7 @@ enum pg_hw_resources { PG_DCHVM, PG_DWB, PG_HPO, + PG_DCOH, PG_HW_RESOURCES_NUM_ELEMENT }; @@ -789,9 +848,8 @@ union dpia_debug_options { uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ - uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ - uint32_t enable_dpia_pre_training:1; /* bit 7 */ - uint32_t unify_link_enc_assignment:1; /* bit 8 */ + uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ + uint32_t enable_bw_allocation_mode:1; /* bit 7 */ uint32_t reserved:24; } bits; uint32_t raw; @@ -918,6 +976,9 @@ struct dc_debug_options { bool disable_dsc_power_gate; bool disable_optc_power_gate; bool disable_hpo_power_gate; + bool disable_io_clk_power_gate; + bool disable_mem_power_gate; + bool disable_dio_power_gate; int dsc_min_slice_height_override; int dsc_bpp_increment_div; bool disable_pplib_wm_range; @@ -1022,6 +1083,7 @@ struct dc_debug_options { unsigned int force_mall_ss_num_ways; bool alloc_extra_way_for_cursor; uint32_t subvp_extra_lines; + bool disable_force_pstate_allow_on_hw_release; bool force_usr_allow; /* uses value at boot and disables switch */ bool disable_dtb_ref_clk_switch; @@ -1095,6 +1157,13 @@ struct dc_debug_options { bool enable_hblank_borrow; bool force_subvp_df_throttle; uint32_t acpi_transition_bitmasks[MAX_PIPES]; + bool enable_pg_cntl_debug_logs; + unsigned int auxless_alpm_lfps_setup_ns; + unsigned int auxless_alpm_lfps_period_ns; + unsigned int auxless_alpm_lfps_silence_ns; + unsigned int auxless_alpm_lfps_t1t2_us; + short auxless_alpm_lfps_t1t2_offset_us; + bool disable_stutter_for_wm_program; }; @@ -1154,7 +1223,7 @@ struct dc_init_data { uint32_t *dcn_reg_offsets; uint32_t *nbio_reg_offsets; uint32_t *clk_reg_offsets; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; struct dc_callback_init { @@ -1255,6 +1324,38 @@ union dc_3dlut_state { }; +#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0 +#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0 +#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0 + +struct lut_rgb { + uint16_t b; + uint16_t g; + uint16_t r; + uint16_t padding; +}; + +//this structure maps directly to how the lut will read it from memory +struct lut_mem_mapping { + union { + //NATIVE MODE 1, 2 + //RGB layout [b][g][r] //red is 128 byte aligned + //BGR layout [r][g][b] //blue is 128 byte aligned + struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN]; + struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN]; + + //TRANSFORMED + uint16_t linear_rgb[(33*33*33*4/128+1)*128]; + }; + uint16_t size; +}; + +struct dc_rmcm_3dlut { + bool isInUse; + const struct dc_stream_state *stream; + uint8_t protection_bits; +}; + struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; @@ -1291,7 +1392,6 @@ union surface_update_flags { uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; uint32_t coeff_reduction_change:1; - uint32_t output_tf_change:1; uint32_t pixel_format_change:1; uint32_t plane_size_change:1; uint32_t gamut_remap_change:1; @@ -1392,6 +1492,8 @@ struct dc_plane_state { int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; + struct spl_sharpness_range sharpness_range; + enum sharpness_range_source sharpness_source; }; struct dc_plane_info { @@ -1573,6 +1675,7 @@ struct dc_scratch_space { bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; bool force_dp_ffe_preset; + bool skip_phy_ssc_reduction; } wa_flags; union dc_dp_ffe_preset forced_dp_ffe_preset; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1582,6 +1685,8 @@ struct dc_scratch_space { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + bool is_dds; + bool is_display_mux_present; bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly struct dc_panel_config panel_config; @@ -1630,12 +1735,15 @@ struct dc { /* Require to optimize clocks and bandwidth for added/removed planes */ bool optimized_required; - bool wm_optimized_required; bool idle_optimizations_allowed; bool enable_c20_dtm_b0; /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + /* For eDP to know the switching state of SmartMux */ + bool is_switch_in_progress_orig; + bool is_switch_in_progress_dest; + /* FBC compressor */ struct compressor *fbc_compressor; @@ -1666,9 +1774,9 @@ struct dc { } scratch; struct dml2_configuration_options dml2_options; - struct dml2_configuration_options dml2_tmp; + struct dml2_configuration_options dml2_dc_power_options; enum dc_acpi_cm_power_state power_state; - + struct soc_and_ip_translator *soc_and_ip_translator; }; struct dc_scaling_info { @@ -1721,6 +1829,23 @@ struct dc_surface_update { struct dc_bias_and_scale bias_and_scale; }; +struct dc_underflow_debug_data { + uint32_t otg_inst; + uint32_t otg_underflow; + uint32_t h_position; + uint32_t v_position; + uint32_t otg_frame_count; + struct dc_underflow_per_hubp_debug_data { + uint32_t hubp_underflow; + uint32_t hubp_in_blank; + uint32_t hubp_readline; + uint32_t det_config_error; + } hubps[MAX_PIPES]; + uint32_t curr_det_sizes[MAX_PIPES]; + uint32_t target_det_sizes[MAX_PIPES]; + uint32_t compbuf_config_error; +}; + /* * Create a new surface with default parameters; */ @@ -1739,8 +1864,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut); void dc_post_update_surfaces_to_stream( struct dc *dc); -#include "dc_stream.h" - /** * struct dc_validation_set - Struct to store surface/stream associations for validation */ @@ -1771,19 +1894,15 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service); -/* - * fast_validate: we return after determining if we can support the new state, - * but before we populate the programming info - */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, @@ -2379,17 +2498,18 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( struct dc_link *link, int peak_bw); /* - * Validate the BW of all the valid DPIA links to make sure it doesn't exceed - * available BW for each host router + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * @dc: pointer to dc struct - * @stream: pointer to all possible streams - * @count: number of valid DPIA streams - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE + * return: dc_status */ -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, - const unsigned int count); +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx); + +/* + * Get if ALPM is supported by the link + */ +void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support, + bool *auxwake_support); /* Sink Interfaces - A sink corresponds to a display output device */ @@ -2618,4 +2738,17 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream, bool dc_is_cursor_limit_pending(struct dc *dc); bool dc_can_clear_cursor_limit(struct dc *dc); +/** + * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data. + * + * @dc: Pointer to the display core context. + * @primary_otg_inst: Instance index of the primary OTG that underflowed. + * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information. + * + * This function collects and logs underflow-related HW states when underflow happens, + * including OTG underflow status, current read positions, frame count, and per-HUBP debug data. + * The results are stored in the provided out_data structure for further analysis or logging. + */ +void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index afbcf866520e..53a088ebddef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; @@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else { /* RCG only */ new_signals.bits.allow_pg = 0; @@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips2 = 0; new_signals.bits.allow_z10 = 0; } + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } + // Setting RCG allow bits (IPSv2.0) + if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) { + new_signals.bits.allow_ips0_rcg = 1; + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) { + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) { + new_signals.bits.allow_ips0_rcg = 1; + } + // IPS dynamic allow bits (IPSv2 change, vpb use case) + if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) { + new_signals.bits.allow_dynamic_ips1 = 1; + } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) { + new_signals.bits.allow_dynamic_ips1 = 1; + new_signals.bits.allow_dynamic_ips1_z8 = 1; } - ips_driver->signals = new_signals; dc_dmub_srv->driver_signals = ips_driver->signals; } @@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; - uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0; if (dc->debug.dmcub_emulation) return; @@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) rcg_exit_count = ips_fw->rcg_exit_count; ips1_exit_count = ips_fw->ips1_exit_count; ips2_exit_count = ips_fw->ips2_exit_count; + ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count; ips_driver->signals.all = 0; dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( - "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", + "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit, ips_fw->rcg_entry_count, ips_fw->ips1_entry_count, - ips_fw->ips2_entry_count); + ips_fw->ips2_entry_count, + ips_fw->ips1_z8ret_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. */ dc_dmub_srv->needs_idle_wake = false; - if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && + if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || - ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) { DC_LOG_IPS( - "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", + "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( - "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); while (ips_fw->signals.bits.ips1_commit) udelay(1); DC_LOG_IPS( - "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", + DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)", __func__, rcg_exit_count, ips1_exit_count, - ips2_exit_count); + ips2_exit_count, + ips1z8_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) @@ -1656,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com return result; } -void dc_dmub_srv_fams2_update_config(struct dc *dc, +static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) { @@ -1722,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr; + union dmub_rb_cmd cmd; + uint32_t i; + + memset(config, 0, sizeof(*config)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; + cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG; + + cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr; + cmd.ib_fams2_config.ib_data.size = sizeof(*config); + + if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { + /* copy static feature configuration overrides */ + config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; + config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; + config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; + + /* send global configuration parameters */ + memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config, + sizeof(struct dmub_cmd_fams2_global_config)); + + /* construct per-stream configs */ + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { + /* copy stream static base state */ + memcpy(&config->stream_v1[i].base, + &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], + sizeof(config->stream_v1[i].base)); + + /* copy stream static sub-state */ + memcpy(&config->stream_v1[i].sub_state, + &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i], + sizeof(config->stream_v1[i].sub_state)); + } + } + + config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; + config->global.features.bits.enable = enable; + + dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + if (dc->debug.fams_version.major == 2) + dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable); + if (dc->debug.fams_version.major == 3) + dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable); +} + void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, @@ -1847,83 +1936,315 @@ void dc_dmub_srv_fams2_passthrough_flip( } } -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) + +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS; + cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL; + cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data); + + // only panel_inst=0 is supported at the moment + cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst; + cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; + + return true; +} + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode) { - bool result; + union dmub_rb_cmd cmd; + uint32_t bytes = sizeof(struct dmub_ips_residency_info); - if (!dc_dmub_srv || !dc_dmub_srv->dmub) + dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS; + cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO; + cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data); + + cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.ips_query_residency_info.info_data.size = bytes; + cmd.ips_query_residency_info.info_data.panel_inst = panel_inst; + cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) || + cmd.ips_query_residency_info.header.ret_status == 0) return false; - result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, - start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + // copy the result to the output since ret_status != 0 means the command returned data + memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr; + lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Init failed in DMUB"); return result; } -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +bool dmub_lsdma_send_linear_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count +) { - uint32_t i; - enum dmub_gpint_command command_code; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) - return; + memset(&cmd, 0, sizeof(cmd)); - switch (output->ips_mode) { - case DMUB_IPS_MODE_IPS1_MAX: - command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS2: - command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_RCG: - command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_ONO2_ON: - command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; - break; - default: - command_code = DMUB_GPINT__INVALID_COMMAND; - break; - } + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; - if (command_code == DMUB_GPINT__INVALID_COMMAND) + lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1 + lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Linear Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_linear_sub_window_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_linear_sub_window_copy_params copy_data +) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz; + lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size; + lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo; + lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi; + lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x; + lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y; + lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch; + lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch; + lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo; + lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi; + lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x; + lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y; + lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch; + lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch; + lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x; + lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y; + lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy; + lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params +) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_x = params.src_x; + lsdma_data->u.tiled_copy_data.src_y = params.src_y; + lsdma_data->u.tiled_copy_data.dst_x = params.dst_x; + lsdma_data->u.tiled_copy_data.dst_y = params.dst_y; + lsdma_data->u.tiled_copy_data.src_width = params.src_width; + lsdma_data->u.tiled_copy_data.dst_width = params.dst_width; + lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.src_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.rect_x = params.rect_x; + lsdma_data->u.tiled_copy_data.rect_y = params.rect_y; + lsdma_data->u.tiled_copy_data.dcc = params.dcc; + lsdma_data->u.tiled_copy_data.tmz = params.tmz; + lsdma_data->u.tiled_copy_data.read_compress = params.read_compress; + lsdma_data->u.tiled_copy_data.write_compress = params.write_compress; + lsdma_data->u.tiled_copy_data.src_height = params.src_height; + lsdma_data->u.tiled_copy_data.dst_height = params.dst_height; + lsdma_data->u.tiled_copy_data.data_format = params.data_format; + lsdma_data->u.tiled_copy_data.max_com = params.max_com; + lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable +) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable; + lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data +) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1; + lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.data = data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Constfill failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.reg_write_data.reg_addr = reg_addr; + lsdma_data->u.reg_write_data.reg_data = reg_data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Poll Reg failed in DMUB"); + + return result; +} + +void dc_dmub_srv_release_hw(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd = {0}; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) return; - for (i = 0; i < GPINT_RETRY_NUM; i++) { - // false could mean GPINT timeout, in which case we should retry - if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, - (uint16_t)(output->ips_mode), &output->residency_percent, - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - break; - udelay(100); - } + memset(&cmd, 0, sizeof(cmd)); + cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; + cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW; + cmd.idle_opt_notify_idle.header.payload_bytes = + sizeof(cmd.idle_opt_notify_idle) - + sizeof(cmd.idle_opt_notify_idle.header); - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, - (uint16_t)(output->ips_mode), - &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->entry_counter = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[1] = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[1] = 0; - - // NUM_IPS_HISTOGRAM_BUCKETS = 16 - for (i = 0; i < 16; i++) - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->histogram[i] = 0; + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index ada5c2fb2db3..7ef93444ef3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -210,6 +210,94 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_surface_update *srf_updates, int surface_count); +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv); +bool dmub_lsdma_send_linear_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count); + +struct lsdma_linear_sub_window_copy_params { + uint32_t src_lo; + uint32_t src_hi; + + uint32_t dst_lo; + uint32_t dst_hi; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_pitch : 16; + uint32_t dst_pitch : 16; + + uint32_t src_slice_pitch; + uint32_t dst_slice_pitch; + + uint32_t tmz : 1; + uint32_t element_size : 3; + uint32_t src_cache_policy : 3; + uint32_t dst_cache_policy : 3; + uint32_t padding : 22; +}; + +bool dmub_lsdma_send_linear_sub_window_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_linear_sub_window_copy_params copy_data +); +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable); +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data); + +struct lsdma_send_tiled_to_tiled_copy_command_params { + uint64_t src_addr; + uint64_t dst_addr; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t src_width : 16; + uint32_t dst_width : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_height : 16; + uint32_t dst_height : 16; + + uint32_t data_format : 6; + uint32_t swizzle_mode : 5; + uint32_t element_size : 3; + uint32_t dcc : 1; + uint32_t tmz : 1; + uint32_t read_compress : 2; + uint32_t write_compress : 2; + uint32_t max_com : 2; + uint32_t max_uncom : 1; + uint32_t padding : 9; +}; + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params); +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data); + /** * struct ips_residency_info - struct containing info from dmub_ips_residency_stats * @@ -223,7 +311,7 @@ void dc_dmub_srv_fams2_passthrough_flip( * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c */ struct ips_residency_info { - enum dmub_ips_mode ips_mode; + enum ips_residency_mode ips_mode; unsigned int residency_percent; unsigned int entry_counter; unsigned int total_active_time_us[2]; @@ -231,21 +319,16 @@ struct ips_residency_info { unsigned int histogram[16]; }; -/** - * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status - * - * @dc_dmub_srv: The DC DMUB service pointer - * @start_measurement: Describes whether to start or stop measurement - * - * Return: true if GPINT was sent successfully, false otherwise - */ -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement); + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, + struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode); /** - * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * - * @dc_dmub_srv: The DC DMUB service pointer - * @output: Output struct to copy the the residency info to + * @dc - pointer to DC object */ -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); +void dc_dmub_srv_release_hw(const struct dc *dc); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index d346f8ae1634..db669ccb1d58 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -162,6 +162,11 @@ struct dc_link_settings { struct dc_tunnel_settings { bool should_enable_dp_tunneling; bool should_use_dp_bw_allocation; + uint8_t cm_id; + uint8_t group_id; + uint32_t bw_granularity; + uint32_t estimated_bw; + uint32_t allocated_bw; }; union dc_dp_ffe_preset { @@ -957,11 +962,21 @@ union usb4_driver_bw_cap { uint8_t raw; }; +/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */ +union dpia_tunnel_info { + struct { + uint8_t group_id :3; + uint8_t rsvd :5; + } bits; + uint8_t raw; +}; + /* DP Tunneling over USB4 */ struct dpcd_usb4_dp_tunneling_info { union dp_tun_cap_support dp_tun_cap; union dpia_info dpia_info; union usb4_driver_bw_cap driver_bw_cap; + union dpia_tunnel_info dpia_tunnel_info; uint8_t usb4_driver_id; uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; @@ -1006,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates { union dp_alpm_lttpr_cap { struct { uint8_t AUX_LESS_ALPM_SUPPORTED :1; - uint8_t RESERVED :7; + uint8_t ASSR_SUPPORTED :1; + uint8_t RESERVED :6; } bits; uint8_t raw; }; @@ -1104,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval { union edp_alpm_caps { struct { - uint8_t AUX_WAKE_ALPM_CAP :1; - uint8_t PM_STATE_2A_SUPPORT :1; - uint8_t AUX_LESS_ALPM_CAP :1; - uint8_t RESERVED :5; + uint8_t AUX_WAKE_ALPM_CAP :1; + uint8_t PM_STATE_2A_SUPPORT :1; + uint8_t AUX_LESS_ALPM_CAP :1; + uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1; + uint8_t RESERVED :4; } bits; uint8_t raw; }; @@ -1267,6 +1284,7 @@ struct dpcd_caps { union dp_receive_port0_cap receive_port0_cap; /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */ uint8_t mso_cap_sst_links_supported; + uint8_t dp_edp_general_cap_2; }; union dpcd_sink_ext_caps { @@ -1332,7 +1350,9 @@ union dpcd_alpm_configuration { struct { unsigned char ENABLE : 1; unsigned char IRQ_HPD_ENABLE : 1; - unsigned char RESERVED : 6; + unsigned char ALPM_MODE_SEL : 1; + unsigned char ACDS_PERIOD_DURATION : 1; + unsigned char RESERVED : 4; } bits; unsigned char raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 7217de258851..5a365bd19933 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -732,7 +732,7 @@ char *dce_version_to_string(const int version) case DCN_VERSION_3_03: return "DCN 3.0.3"; case DCN_VERSION_3_1: - return "DCN 3.1"; + return "DCN 3.1.2"; case DCN_VERSION_3_14: return "DCN 3.1.4"; case DCN_VERSION_3_15: @@ -755,3 +755,8 @@ char *dce_version_to_string(const int version) return "Unknown"; } } + +bool dc_supports_vrr(const enum dce_version v) +{ + return v >= DCE_VERSION_8_0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index c9f6c6275ca1..667852517246 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -68,7 +68,7 @@ enum dc_plane_addr_type { struct dc_plane_address { enum dc_plane_addr_type type; - bool tmz_surface; + uint8_t tmz_surface; union { struct{ PHYSICAL_ADDRESS_LOC addr; @@ -1104,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select { enum mpcc_gamut_remap_id { MPCC_OGAM_GAMUT_REMAP, MPCC_MCM_FIRST_GAMUT_REMAP, - MPCC_MCM_SECOND_GAMUT_REMAP + MPCC_MCM_SECOND_GAMUT_REMAP, + MPCC_RMCM_GAMUT_REMAP, }; enum cursor_matrix_mode { diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index e3a8283b4098..55704d4457ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx); // Make spl input basic out info output_size width point to stream h active spl_in->basic_out.output_size.width = - stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow; + stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; // Make spl input basic out info output_size height point to v active spl_in->basic_out.output_size.height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; @@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->adaptive_sharpness.enable = true; spl_in->adaptive_sharpness.sharpness_level = 0; } else if (sharpness_setting == SHARPNESS_CUSTOM) { - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500; + /* SAT: read harpness_range from dc_plane_state */ + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid; if (force_sharpness_level > 0) { if (force_sharpness_level > 10) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 341d2ffb64b1..76cf9fdedab0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -203,6 +203,7 @@ struct dc_stream_state { struct dc_info_packet hfvsif_infopacket; struct dc_info_packet vtem_infopacket; struct dc_info_packet adaptive_sync_infopacket; + struct dc_info_packet avi_infopacket; uint8_t dsc_packed_pps[128]; struct rect src; /* composition area */ struct rect dst; /* stream addressable area */ @@ -335,6 +336,8 @@ struct dc_stream_update { struct dc_info_packet *hfvsif_infopacket; struct dc_info_packet *vtem_infopacket; struct dc_info_packet *adaptive_sync_infopacket; + struct dc_info_packet *avi_infopacket; + bool *dpms_off; bool integer_scaling_update; bool *allow_freesync; @@ -579,6 +582,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc, bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream); +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one); + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream); + +void dc_stream_init_rmcm_3dlut(struct dc *dc); + struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream); void dc_dmub_update_dirty_rect(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index a4cd0eb39a3a..b5aa03a3e39c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -175,6 +175,7 @@ struct dc_panel_patch { unsigned int embedded_tiled_slave; unsigned int disable_fams; unsigned int skip_avmute; + unsigned int skip_audio_sab_check; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; @@ -263,6 +264,7 @@ enum dc_timing_source { TIMING_SOURCE_EDID_4BYTE, TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB, TIMING_SOURCE_EDID_CEA_RID, + TIMING_SOURCE_EDID_DISPLAYID_TYPE5, TIMING_SOURCE_VBIOS, TIMING_SOURCE_CV, TIMING_SOURCE_TV, @@ -561,6 +563,12 @@ struct dc_info_packet_128 { uint8_t sb[128]; }; +struct dc_edid_read_policy { + uint32_t max_retry_count; + uint32_t delay_time_ms; + uint32_t ignore_checksum; +}; + #define DC_PLANE_UPDATE_TIMES_MAX 10 struct dc_plane_flip_time { @@ -569,6 +577,12 @@ struct dc_plane_flip_time { unsigned int prev_update_time_in_us; }; +enum dc_alpm_mode { + DC_ALPM_AUXWAKE = 0, + DC_ALPM_AUXLESS = 1, + DC_ALPM_UNSUPPORTED = 0xF, +}; + enum dc_psr_state { PSR_STATE0 = 0x0, PSR_STATE1, @@ -614,6 +628,7 @@ struct psr_config { unsigned int line_time_in_us; uint8_t rate_control_caps; uint16_t dsc_slice_height; + bool os_request_force_ffu; }; union dmcu_psr_level { @@ -726,6 +741,7 @@ struct psr_context { unsigned int line_time_in_us; uint8_t rate_control_caps; uint16_t dsc_slice_height; + bool os_request_force_ffu; }; struct colorspace_transform { @@ -1135,6 +1151,10 @@ struct replay_config { bool low_rr_supported; /* Replay Video Conferencing Optimization Enabled */ bool replay_video_conferencing_optimization_enabled; + /* Replay alpm mode */ + enum dc_alpm_mode alpm_mode; + /* Replay full screen only */ + bool os_request_force_ffu; }; /* Replay feature flags*/ @@ -1197,6 +1217,7 @@ struct dc_panel_config { bool rc_disable; bool rc_allow_static_screen; bool rc_allow_fullscreen_VPB; + bool read_psrcap_again; unsigned int replay_enable_option; } psr; /* ABM */ @@ -1255,7 +1276,6 @@ enum dc_cm2_gpu_mem_layout { enum dc_cm2_gpu_mem_pixel_component_order { DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA, - DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA }; enum dc_cm2_gpu_mem_format { @@ -1277,7 +1297,6 @@ struct dc_cm2_gpu_mem_format_parameters { enum dc_cm2_gpu_mem_size { DC_CM2_GPU_MEM_SIZE_171717, - DC_CM2_GPU_MEM_SIZE_333333, DC_CM2_GPU_MEM_SIZE_TRANSFORMED, }; @@ -1315,6 +1334,7 @@ struct dc_cm2_func_luts { bool mpc_3dlut_enable; bool rmcm_3dlut_enable; bool mpc_mcm_post_blend; + uint8_t rmcm_tmz; } lut3d_data; const struct dc_transfer_func *lut1d_func; }; @@ -1372,4 +1392,19 @@ struct set_backlight_level_params { uint8_t aux_inst; }; +enum dc_validate_mode { + /* validate the mode and program HW */ + DC_VALIDATE_MODE_AND_PROGRAMMING = 0, + /* only validate the mode */ + DC_VALIDATE_MODE_ONLY = 1, + /* validate the mode and get the max state (voltage level) */ + DC_VALIDATE_MODE_AND_STATE_INDEX = 2, +}; + +struct dc_validation_dpia_set { + const struct dc_link *link; + const struct dc_tunnel_settings *tunnel_settings; + uint32_t required_bw; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index 58c84f555c0f..de6d62401362 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -39,6 +39,7 @@ #define CTX \ dccg_dcn->base.ctx +#include "logger_types.h" #define DC_LOGGER \ dccg->ctx->logger @@ -133,30 +134,34 @@ enum dsc_clk_source { }; -static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable) +static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool allow_rcg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable) + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && allow_rcg) return; switch (inst) { case 0: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 1: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 2: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 3: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; default: BREAK_TO_DEBUGGER(); return; } + + /* Wait for clock to ramp */ + if (!allow_rcg) + udelay(10); } static void dccg35_set_symclk32_se_rcg( @@ -385,35 +390,34 @@ static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable) } } -static void dccg35_set_dppclk_rcg(struct dccg *dccg, - int inst, bool enable) +static void dccg35_set_dppclk_rcg(struct dccg *dccg, int inst, bool allow_rcg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable) + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && allow_rcg) return; switch (inst) { case 0: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 1: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 2: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; case 3: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1); break; default: BREAK_TO_DEBUGGER(); break; } - //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1); + /* Wait for clock to ramp */ + if (!allow_rcg) + udelay(10); } static void dccg35_set_dpstreamclk_rcg( @@ -1133,7 +1137,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, default: break; } - //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable); + DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable); } @@ -1177,32 +1181,34 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, } static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, - uint32_t dpp_inst, uint32_t enable) + uint32_t dpp_inst, uint32_t disallow_rcg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && !disallow_rcg) return; switch (dpp_inst) { case 0: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, disallow_rcg); break; case 1: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, disallow_rcg); break; case 2: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, disallow_rcg); break; case 3: - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, disallow_rcg); break; default: break; } - //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable); + /* Wait for clock to ramp */ + if (disallow_rcg) + udelay(10); } static void dccg35_get_pixel_rate_div( @@ -1401,6 +1407,10 @@ static void dccg35_set_dtbclk_dto( * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the * programming is handled in program_pix_clk() regardless, so it can be removed from here. */ + DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n", + __func__, params->otg_inst, params->pixclk_khz, + params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo); + } else { switch (params->otg_inst) { case 0: @@ -1426,6 +1436,8 @@ static void dccg35_set_dtbclk_dto( REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); + + DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst); } } @@ -1470,6 +1482,8 @@ static void dccg35_set_dpstreamclk( BREAK_TO_DEBUGGER(); return; } + DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n", + __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst); } @@ -1509,6 +1523,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating( BREAK_TO_DEBUGGER(); return; } + DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n", + __func__, dp_hpo_inst, enable ? 1 : 0); } @@ -1548,7 +1564,7 @@ static void dccg35_set_physymclk_root_clock_gating( BREAK_TO_DEBUGGER(); return; } - //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1); + DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1); } @@ -1621,6 +1637,8 @@ static void dccg35_set_physymclk( BREAK_TO_DEBUGGER(); return; } + DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n", + __func__, phy_inst, force_enable ? 1 : 0, clk_src); } static void dccg35_set_valid_pixel_rate( @@ -1668,6 +1686,7 @@ static void dccg35_dpp_root_clock_control( } dccg->dpp_clock_gated[dpp_inst] = !clock_on; + DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); } static void dccg35_disable_symclk32_se( @@ -1726,6 +1745,7 @@ static void dccg35_disable_symclk32_se( BREAK_TO_DEBUGGER(); return; } + } static void dccg35_init_cb(struct dccg *dccg) @@ -1733,7 +1753,6 @@ static void dccg35_init_cb(struct dccg *dccg) (void)dccg; /* Any RCG should be done when driver enter low power mode*/ } - void dccg35_init(struct dccg *dccg) { int otg_inst; @@ -1748,6 +1767,8 @@ void dccg35_init(struct dccg *dccg) for (otg_inst = 0; otg_inst < 2; otg_inst++) { dccg31_disable_symclk32_le(dccg, otg_inst); dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false); + DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n", + __func__, otg_inst); } // if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) @@ -1760,6 +1781,8 @@ void dccg35_init(struct dccg *dccg) dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst, otg_inst); dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false); + DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n", + __func__, otg_inst); } /* @@ -1782,8 +1805,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) //Disable DTO switch (inst) { case 0: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, @@ -1791,8 +1813,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); break; case 1: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, @@ -1800,8 +1821,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); break; case 2: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, @@ -1809,8 +1829,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); break; case 3: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, @@ -1821,6 +1840,9 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) BREAK_TO_DEBUGGER(); return; } + + /* Wait for clock to ramp */ + udelay(10); } static void dccg35_disable_dscclk(struct dccg *dccg, @@ -1864,6 +1886,9 @@ static void dccg35_disable_dscclk(struct dccg *dccg, default: return; } + + /* Wait for clock ramp */ + udelay(10); } static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) @@ -2349,10 +2374,7 @@ static void dccg35_disable_symclk_se_cb( void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating) { - - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) { - dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating); - } + dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating); } static const struct dccg_funcs dccg35_funcs_new = { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index ffd172231fdf..0b8ed9b94d3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -619,7 +619,7 @@ void dccg401_set_dp_dto( dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz); dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz; - if (dto_phase_hz <= 0) { + if (dto_phase_hz <= 0 && dto_integer <= 0) { /* negative pixel rate should never happen */ BREAK_TO_DEBUGGER(); return; @@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg) } } -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst) +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h index 55e8718aad22..5947a35363aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h @@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h); void dccg401_set_ref_dscclk(struct dccg *dccg, uint32_t dsc_inst); void dccg401_set_src_sel( @@ -230,7 +230,6 @@ void dccg401_set_dp_dto( const struct dp_dto_params *params); void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); void dccg401_set_dtbclk_p_src( struct dccg *dccg, enum streamclk_source src, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index bb4ac5042c80..673bb87d2c17 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, for (i = 0; i < AUX_MAX_RETRIES; i++) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, - "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d", + "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: " + "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d", ddc && ddc->link ? ddc->link->link_index : UINT_MAX, i + 1, (int)AUX_MAX_RETRIES, payload->address, payload->length, (unsigned int) payload->write, - (unsigned int) payload->mot); + (unsigned int) payload->mot, + payload->i2c_over_aux, + (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false, + ddc->link->ddc_hw_inst); if (payload->write) dce_aux_log_payload(" write", payload->data, payload->length, 16); @@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, - "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u", + "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: " + "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d " + "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d", ddc && ddc->link ? ddc->link->link_index : UINT_MAX, i + 1, (int)AUX_MAX_RETRIES, @@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, (unsigned int) payload->mot, ret, (int)operation_result, - (unsigned int) *payload->reply); + (unsigned int) *payload->reply, + payload->i2c_over_aux, + (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false, + ddc->link->ddc_hw_inst); if (!payload->write) dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index d28826c3ae5f..365dd2e37aea 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -292,9 +292,35 @@ static void set_speed( FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } +static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + switch (arbitrate) { + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW: + return true; + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW: + return false; + case DC_I2C_STATUS__DC_I2C_STATUS_IDLE: + default: + break; + } + + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return false; + + return true; +} + static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { + // Deassert soft reset to unblock I2C engine registers + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false); + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; uint32_t reset_length = 0; @@ -309,8 +335,8 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); + if (!acquire_engine(dce_i2c_hw)) + return false; /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); @@ -319,9 +345,8 @@ static bool setup_engine( i2c_setup_limit = dce_i2c_hw->setup_limit; /* Program pin select */ - REG_UPDATE_6(DC_I2C_CONTROL, + REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_TRANSACTION_COUNT, 0, @@ -351,6 +376,32 @@ static bool setup_engine( return true; } +/** + * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state + * @dce_i2c_hw: Pointer to dce_i2c_hw structure + * + * If we boot without an HDMI display, the I2C engine does not get initialized + * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after + * acquire. After setting SW_DONE_USING_I2C on release, the engine gets + * immediately reacquired by SW, preventing DMUB from using it. + * + * This function checks the I2C arbitration status and applies a release + * workaround if necessary. + */ +static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return; + + // Still acquired after release, release again as a workaround + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW); +} + static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { @@ -378,9 +429,9 @@ static void release_engine( /*for HW HDCP Ri polling failure w/a test*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp); - /* Release I2C after reset, so HW or DMCU could use it */ - REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1, - DC_I2C_SW_USE_I2C_REG_REQ, 0); + // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + cntl_stuck_hw_workaround(dce_i2c_hw); if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) @@ -540,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; - request.address = (uint8_t) ((payload->address << 1) | !payload->write); + request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1)); request.length = payload->length; request.data = payload->data; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index e188447c8156..2d73b94c515c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; - request.address = (uint8_t) ((payload->address << 1) | !payload->write); + request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1)); request.length = payload->length; request.data = payload->data; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 4a9d07c31bc5..0c50fe266c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -896,13 +896,13 @@ void dce110_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); @@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index ff3b8244ba3d..87af4fdc04a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, sizeof(DP_SINK_DEVICE_STR_ID_1))) link->psr_settings.force_ffu_mode = 1; - copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode; + copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu; if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !link->dc->debug.disable_fec) && diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index fcd3d86ad517..f9542edff14b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -3,6 +3,7 @@ // Copyright 2024 Advanced Micro Devices, Inc. #include "dc.h" +#include "link_service.h" #include "dc_dmub_srv.h" #include "dmub/dmub_srv.h" #include "core_types.h" @@ -168,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable; copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported; + copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode; copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm; @@ -189,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, else copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; + copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode; + if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) { + copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns; + copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns; + copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns; + copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us = + dc->dc->debug.auxless_alpm_lfps_t1t2_us; + copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us = + dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us; + copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link); + } + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c index e0558a78b11c..1c1228116487 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c @@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream( enc10, &stream->timing); break; case SIGNAL_TYPE_EDP: - is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false; + is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; break; case SIGNAL_TYPE_VIRTUAL: is_valid = true; diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index 22e66b375a7f..d928b4dcf6b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -28,7 +28,7 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #include "dcn30/dcn30_afmt.h" diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c index 0b47aeb60e79..bec0b4aaeb2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c index 9a92f73d5b7f..84cc2ddc52fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c @@ -37,7 +37,7 @@ #include "link_enc_cfg.h" #include "dc_dmub_srv.h" #include "dal_asic_id.h" -#include "link.h" +#include "link_service.h" #define CTX \ enc10->base.ctx diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c index ae81451a3a72..3e85e9c3d2cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -30,7 +30,7 @@ #include "dcn314_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c index 1a9bb614c41e..3523d1cdc1a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn32_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c index 6ab2a218b769..fd5d1dbf9dc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn35_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc) uint32_t reset_val; REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val); - return (reset_val == 0) ? false : true; + return reset_val != 0; } void enc35_disable_fifo(struct stream_encoder *enc) { diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index d5fa551dd3c9..99aab70ef3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -32,7 +32,7 @@ #include "dcn401_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 7b9c22c45453..fbbf9c757b3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc /* * SMU message tracing */ -void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx); -void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx); - -#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx) -#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx) +void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx); +void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx); +#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx) +#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx) +#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx) +#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx) /* * DMUB Interfaces @@ -311,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx, char *dce_version_to_string(const int version); +bool dc_supports_vrr(const enum dce_version v); + #endif /* __DM_SERVICES_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index bf63da266a18..3b093b8699ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -127,7 +127,7 @@ struct dm_pp_single_disp_config { uint32_t src_height; uint32_t src_width; uint32_t v_refresh; - uint32_t sym_clock; /* HDMI only */ + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ struct dc_link_settings link_settings; /* DP only */ }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index e1d500633dfa..b357683b4255 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags) - ifdef CONFIG_DRM_AMD_DC_FP DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o DML += dcn10/dcn10_fpu.o @@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o DML += dcn314/dcn314_fpu.o DML += dcn35/dcn35_fpu.o DML += dcn351/dcn351_fpu.o -DML += dcn401/dcn401_fpu.o DML += dsc/rc_calc_fpu.o DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index f1235bf9a596..74962791302f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic) bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { /* * we want a breakdown of the various stages of validation, which the @@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth( BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) @@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth( } } else if (v->voltage_level == number_of_states_plus_one) { BW_VAL_TRACE_SKIP(fail); - } else if (fast_validate) { + } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index e9fea9c2162e..7aaf13bbd4e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -30,8 +30,7 @@ #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" #include "clk_mgr/dcn21/rn_clk_mgr.h" - -#include "link.h" +#include "link_service.h" #include "dcn20_fpu.h" #include "dc_state_priv.h" @@ -1315,7 +1314,7 @@ static void swizzle_to_dml_params( int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i; bool synchronized_vblank = true; @@ -1733,7 +1732,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; @@ -1780,10 +1779,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2027,7 +2026,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2040,7 +2039,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_COUNT(); - out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2050,12 +2049,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -2077,7 +2076,7 @@ validate_out: } bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool voltage_supported = false; bool full_pstate_supported = false; @@ -2095,12 +2094,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - if (fast_validate) { - return dcn20_validate_bandwidth_internal(dc, context, true, pipes); - } + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes); // Best case, we support full UCLK switch latency - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || @@ -2113,7 +2111,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st)); - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { @@ -2156,14 +2154,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { @@ -2239,7 +2237,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel_req, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; int vlevel, vlevel_max; @@ -2281,10 +2279,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2319,7 +2317,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, } bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2337,7 +2335,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2347,12 +2345,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index b6c34198ddc8..aed00039ca62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc, int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks); void dcn20_update_bounding_box(struct dc *dc, @@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc, void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes); void dcn20_fpu_set_wm_ranges(int i, struct pp_smu_wm_range_sets *ranges, struct _vcs_dpi_soc_bounding_box_st *loaded_bb); @@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); -bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool - fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode); +bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum + dc_validate_mode, display_e2e_pipe_params_st *pipes); void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 390c1a77fda6..9c58ff1069d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, // the dpte_group_bytes is reduced for the specific case of vertical // access of a tile surface that has dpte request of 8x1 ptes. - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else //full size diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 843d6004258c..570e6e39eb45 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, // the dpte_group_bytes is reduced for the specific case of vertical // access of a tile surface that has dpte request of 8x1 ptes. - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else //full size diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 5718000627b0..f549da082c01 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -652,7 +652,7 @@ static void get_meta_and_pte_attr( if (hostvm_enable) rq_sizing_param->dpte_group_bytes = 512; else { - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else //full size diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index 88789987bdbc..e5f5c0663750 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg( * newly found dummy_latency_index */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; @@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, while (dummy_latency_index < max_latency_table_entries) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank == dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index 8d4873f80df0..4fb37df54d59 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, if (hostvm_enable) rq_sizing_param->dpte_group_bytes = 512; else { - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else rq_sizing_param->dpte_group_bytes = 2048; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 0c0b2d67c9cd..1aaa77265eed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; unsigned int i, closest_clk_lvl; - int j; + int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0; dc_assert_fp_enabled(); @@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p dcn3_01_soc.num_chans = bw_params->num_channels; ASSERT(clk_table->num_entries); + + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } + for (i = 0; i < clk_table->num_entries; i++) { /* loop backwards*/ for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { @@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + s[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; @@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc, &context->bw_ctx.dml, pipes, pipe_cnt); /* WM Set C */ table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = min(max(vlevel_req, 2), vlevel_max); + vlevel = clamp(vlevel_req, 2, vlevel_max); calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, &context->bw_ctx.dml, pipes, pipe_cnt); /* WM Set B */ table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = min(max(vlevel_req, 1), vlevel_max); + vlevel = clamp(vlevel_req, 1, vlevel_max); calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, &context->bw_ctx.dml, pipes, pipe_cnt); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c index 8da97a96b1ce..8d7c59ec701d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c @@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p j = 0; /* create the final dcfclk and uclk table */ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c index e968870a4b81..b5d3fd4c3694 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c @@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p j = 0; /* create the final dcfclk and uclk table */ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index d2ae43a82ba5..dfcc5d50071e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate( int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* __DCN31_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index c46bda2141ac..bfeb01477f0c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -615,7 +615,7 @@ static void get_meta_and_pte_attr( if (hostvm_enable) rq_sizing_param->dpte_group_bytes = 512; else { - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else rq_sizing_param->dpte_group_bytes = 2048; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 5ed117e11aa2..df9d50b9b57c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c dc_assert_fp_enabled(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h index d32c5bb99f4c..362ac79184ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h @@ -35,6 +35,6 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index b7d2a0caec11..04df263ff65e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -703,7 +703,7 @@ static void get_meta_and_pte_attr( if (hostvm_enable) rq_sizing_param->dpte_group_bytes = 512; else { - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group rq_sizing_param->dpte_group_bytes = 512; else rq_sizing_param->dpte_group_bytes = 2048; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b0fc1fd20208..8a0f128722b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -31,7 +31,7 @@ // We need this includes for WATERMARKS_* defines #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); /* for subvp + DRR case, if subvp pipes are still present we support pstate */ if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && @@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) @@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); // Populate dppclk to trigger a recalculate in dml_get_voltage_level // so the phantom pipe DLG params can be assigned correctly. pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); @@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc_state_remove_phantom_streams_and_planes(dc, context); dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); /* This may adjust vlevel and maxMpcComb */ @@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; bool repopulate_pipes = false; @@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, for (i = 0; i < context->stream_count; i++) resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc, dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt, &repopulate_pipes)) goto validate_fail; } - if (fast_validate || + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || (dc->debug.dml_disallow_alternate_prefetch_modes && (vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { @@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_none; - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.validate_max_state = false; @@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, int flag_vlevel = vlevel; int i; - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!dc->config.enable_windowed_mpo_odm) dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); @@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, } context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; @@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; } - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, + DC_VALIDATE_MODE_AND_PROGRAMMING); if (vlevel_temp < vlevel) { vlevel = vlevel_temp; maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; @@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, stream_status->fpo_in_use = false; } context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING); } } } @@ -3225,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa j = 0; // create the final dcfclk and uclk table while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; } else { @@ -3397,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) uint32_t height = subvp_active_margin_list.res[i].height; refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 276e90e4e0ce..273d2bd79d85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index 9ba6cb67655f..6c75aa82327a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, if (dual_plane) { unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); - ; if (src->sw_mode == dm_sw_linear) ASSERT(p1_pte_row_height_linear >= 8); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 8839faf42207..e0a1dc89ce43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p j = 0; // create the final dcfclk and uclk table while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 92f0a099d089..c9dd920744c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -31,7 +31,7 @@ #include "dml/dcn31/dcn31_fpu.h" #include "dml/dml_inline_defs.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) @@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h index 067480fc3691..d121c5afce71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h @@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 17d0b4923b0c..8cda18ce1a76 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -10,7 +10,7 @@ #include "dml/dcn35/dcn35_fpu.h" #include "dml/dml_inline_defs.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) @@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h index f93efab9a668..f71d9d8d0759 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c deleted file mode 100644 index 4fbecb5ff349..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#include "dcn401_fpu.h" -#include "dcn401/dcn401_resource.h" -// We need this includes for WATERMARKS_* defines -#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h" -#include "link.h" - -#define DC_LOGGER_INIT(logger) - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -{ - /* defaults */ - double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us; - double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us; - double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us; - double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us; - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - uint16_t setb_min_uclk_mhz = min_uclk_mhz; - uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; - - dc_assert_fp_enabled(); - - /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ - if (dcfclk_mhz_for_the_second_state) - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; - else - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz) - setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; - clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; - clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; - clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; - clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; - } - /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ - /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ - clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; -} - -/* - * dcn401_update_bw_bounding_box - * - * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from - * spreadsheet with actual values as per dGPU SKU: - * - with passed few options from dc->config - * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might - * need to get it from PM FW) - * - with passed latency values (passed in ns units) in dc-> bb override for - * debugging purposes - * - with passed latencies from VBIOS (in 100_ns units) if available for - * certain dGPU SKU - * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU - * of the same ASIC) - * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM - * FW for different clocks (which might differ for certain dGPU SKU of the - * same ASIC) - */ -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) -{ - dc_assert_fp_enabled(); - - /* Override from passed dc->bb_overrides if available*/ - if (dc->bb_overrides.sr_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - dc->bb_overrides.sr_exit_time_ns / 1000.0; - - if (dc->bb_overrides.sr_enter_plus_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - - if (dc->bb_overrides.urgent_latency_ns) - dc->dml2_options.bbox_overrides.urgent_latency_us = - dc->bb_overrides.urgent_latency_ns / 1000.0; - - if (dc->bb_overrides.dram_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - - if (dc->bb_overrides.fclk_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dc->dml2_options.bbox_overrides.dram_num_chan = - dc->ctx->dc_bios->vram_info.num_chans; - - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dc->dml2_options.bbox_overrides.dram_chanel_width_bytes = - dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0; - dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0; - - if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) { - unsigned int i = 0; - - dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) { - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - } - } - } -} - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h deleted file mode 100644 index 329f1788843c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#ifndef __DCN401_FPU_H__ -#define __DCN401_FPU_H__ - -#include "clk_mgr.h" - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr); - -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 157ecf008d6c..4c21ce42054c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) AMD_DISPLAY_FILES += $(AMD_DAL_DML2) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) @@ -94,17 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) - - CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) @@ -120,6 +120,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) DML21 := src/dml2_top/dml2_top_interfaces.o DML21 += src/dml2_top/dml2_top_soc15.o DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o DML21 += src/dml2_core/dml2_core_factory.o DML21 += src/dml2_core/dml2_core_dcn4_calcs.o DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 7ae9c0ba0c9e..4b9b2e84d381 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo mode_lib->ms.TotImmediateFlipBytes = 0; for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) { - mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]; + mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]); if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) { mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]); } else { @@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx, in_out_params->mode_lib, in_out_params->in_display_cfg, - 0, + in_out_params->in_start_state_idx, in_out_params->mode_lib->states.num_states - 1); if (result) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index 0670e4dc4fd9..dbeb08466092 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -1917,6 +1917,7 @@ struct display_mode_lib_st { struct dml_mode_support_ex_params_st { struct display_mode_lib_st *mode_lib; const struct dml_display_cfg_st *in_display_cfg; + dml_uint_t in_start_state_idx; dml_uint_t out_lowest_state_idx; struct dml_mode_support_info_st *out_evaluation_info; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 2aa6d44bb359..bf5e7f4e0416 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -2,338 +2,73 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml21_wrapper.h" #include "dml2_core_dcn4_calcs.h" #include "dml2_internal_shared_types.h" #include "dml2_internal_types.h" #include "dml21_utils.h" #include "dml21_translation_helper.h" -#include "bounding_boxes/dcn4_soc_bb.h" - -static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) -{ - const struct dml2_soc_bb *soc_bb; - const struct dml2_soc_qos_parameters *qos_params; - - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - default: - if (config->bb_from_dmub) - soc_bb = config->bb_from_dmub; - else - soc_bb = &dml2_socbb_dcn401; - - qos_params = &dml_dcn4_variant_a_soc_qos_params; - } +#include "soc_and_ip_translator.h" - /* patch soc bb */ - memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb)); - - /* patch qos params */ - memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters)); -} - -static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config) -{ - memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb)); -} - -static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init, +static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options, + const struct dc *in_dc, const struct dml2_configuration_options *config) { - memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities)); + bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable; + + /* ODM options */ + pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; + pmo_options->disable_dyn_odm_for_multi_stream = true; + pmo_options->disable_dyn_odm_for_stream_with_svp = true; + + pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); + + /* NOTE: DRR and SubVP Require FAMS2 */ + pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || + in_dc->debug.force_disable_subvp || + disable_fams2; + pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || + disable_fams2; + pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || + disable_fams2; + pmo_options->disable_fams2 = disable_fams2; + + pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || + in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; + pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) +static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version) { - const struct dml2_ip_capabilities *ip_caps; - - switch (in_dc->ctx->dce_version) { + enum dml2_project_id project_id; + switch (dcn_version) { case DCN_VERSION_4_01: + project_id = dml2_project_dcn4x_stage2_auto_drr_svp; + break; default: - ip_caps = &dml2_dcn401_max_ip_caps; + project_id = dml2_project_invalid; + DC_ERR("unsupported dcn version for DML21!"); + break; } - memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities)); + return project_id; } -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_socbb_params(dml_init, config, in_dc); - else - dml21_external_socbb_params(dml_init, config); -} - -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) -{ - if (config->use_native_soc_bb_construction) - dml21_init_ip_params(dml_init, config, in_dc); - else - dml21_external_ip_params(dml_init, config); -} - -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, const struct dc *in_dc) -{ - int i; - - const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; - const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table; - struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; - struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table; - - /* override clocks if smu is present */ - if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) { - /* dcfclk */ - if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { - dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dcfclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && - dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; - dml_clk_table->dcfclk.num_clk_values = i + 1; - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; - dml_clk_table->dcfclk.num_clk_values = i; - } - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; - } - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; - } - } - } - - /* fclk */ - if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { - dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->fclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && - dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { - dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; - dml_clk_table->fclk.num_clk_values = i + 1; - } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; - dml_clk_table->fclk.num_clk_values = i; - } - } else { - dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; - } - } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; - } - } - } + dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version); - /* uclk */ - if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { - dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->uclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && - dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { - dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; - dml_clk_table->uclk.num_clk_values = i + 1; - } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; - dml_clk_table->uclk.num_clk_values = i; - } - } else { - dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; - } - } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; - } - } - } - - /* dispclk */ - if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { - dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dispclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && - dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { - dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; - dml_clk_table->dispclk.num_clk_values = i + 1; - } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; - dml_clk_table->dispclk.num_clk_values = i; - } - } else { - dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; - } - } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; - } - } - } - - /* dppclk */ - if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { - dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dppclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && - dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { - dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; - dml_clk_table->dppclk.num_clk_values = i + 1; - } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; - dml_clk_table->dppclk.num_clk_values = i; - } - } else { - dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; - } - } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; - } - } - } - - /* dtbclk */ - if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { - dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dtbclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && - dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; - dml_clk_table->dtbclk.num_clk_values = i + 1; - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; - dml_clk_table->dtbclk.num_clk_values = i; - } - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; - } - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; - } - } - } - - /* socclk */ - if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { - dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->socclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && - dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { - dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; - dml_clk_table->socclk.num_clk_values = i + 1; - } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; - dml_clk_table->socclk.num_clk_values = i; - } - } else { - dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; - } - } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; - } - } - } - - /* do not override phyclks for now */ - /* phyclk */ - // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000; - // } - - /* phyclk_d18 */ - // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000; - // } - - /* phyclk_d32 */ - // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000; - // } - } - - dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; - dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; - dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - /* override bounding box paramters from VBIOS */ - if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0) - dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10; - - if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0) - dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = - (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10; - - if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0) - dml_soc_bb->power_management_parameters.stutter_exit_latency_us = - (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10; - - if (dc_bw_params->num_channels) { - dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels; - dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576; - } else if (in_dc->ctx->dc_bios->vram_info.num_chans) { - dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans; - dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576; - } - - if (dc_bw_params->dram_channel_width_bytes) { - dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes; - } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) { - dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - } - - /* override bounding box paramters from DC config */ - if (in_dc->bb_overrides.sr_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_exit_latency_us = - in_dc->bb_overrides.sr_exit_time_ns / 1000.0; - } - - if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = - in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - - if (in_dc->bb_overrides.dram_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - - if (in_dc->bb_overrides.fclk_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.fclk_change_blackout_us = - in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0; + if (config->use_native_soc_bb_construction) { + in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config); + in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps); + } else { + dml_init->soc_bb = config->external_socbb_ip_params->soc_bb; + dml_init->ip_caps = config->external_socbb_ip_params->ip_params; } - //TODO - // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) { - // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - // } + dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config); } static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) @@ -349,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing, struct dc_stream_state *stream, + struct pipe_ctx *pipe_ctx, struct dml2_context *dml_ctx) { unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; + uint32_t pix_clk_100hz; - timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; timing->h_front_porch = stream->timing.h_front_porch; timing->v_front_porch = stream->timing.v_front_porch; timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10; + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) + timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) timing->pixel_clock_khz *= 2; - timing->h_total = stream->timing.h_total; + timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; timing->v_total = stream->timing.v_total; timing->h_sync_width = stream->timing.h_sync_width; timing->interlaced = stream->timing.flags.INTERLACE; hblank_start = stream->timing.h_total - stream->timing.h_front_porch; - timing->h_blank_end = hblank_start - stream->timing.h_addressable + timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding - stream->timing.h_border_left - stream->timing.h_border_right; if (hblank_start < stream->timing.h_addressable) @@ -386,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf /* limit min refresh rate to DC cap */ min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz; if (stream->ctx->dc->caps.max_v_total != 0) { - min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), - (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) { + pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + } else { + pix_clk_100hz = stream->timing.pix_clk_100hz; + } + min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL), + (timing->h_total * (long long)calc_max_hardware_v_total(stream))); } - if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) { - timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz; - } else { - timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz; - } + timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz); if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase && stream->ctx->dc->config.enable_fpo_flicker_detection == 1) @@ -442,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf timing->vblank_nom = timing->v_total - timing->v_active; } -/** - * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration - * based on the pipe context. - * @timing: Pointer to the dml2_timing_cfg structure to be adjusted. - * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value. - * - * This function modifies the horizontal active and blank end timings by adding and subtracting - * the horizontal blanking borrow value from the pipe context, respectively. - */ -static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe) -{ - timing->h_active += pipe->hblank_borrow; - timing->h_blank_end -= pipe->hblank_borrow; -} - static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output, struct dc_stream_state *stream, const struct pipe_ctx *pipe) { @@ -726,7 +451,6 @@ static void populate_dml21_surface_config_from_plane_state( switch (plane_state->tiling_info.gfxversion) { case DcGfxVersion7: case DcGfxVersion8: - // Placeholder for programming the array_mode break; case DcGfxVersion9: case DcGfxVersion10: @@ -757,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane( temp_pipe->plane_state = pipe->plane_state; temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; temp_pipe->stream_res = pipe->stream_res; - temp_pipe->hblank_borrow = pipe->hblank_borrow; + temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding; + temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding; + temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz; dml_ctx->config.callbacks.build_scaling_params(temp_pipe); break; } @@ -889,10 +615,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm case DC_CM2_GPU_MEM_SIZE_171717: plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; break; - case DC_CM2_GPU_MEM_SIZE_333333: - plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube; - break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: + default: //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined break; } @@ -1027,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s disp_cfg_stream_location = dml_dispcfg->num_streams++; ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); - populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); - adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); + populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); @@ -1095,6 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz; context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz; context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz; + context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency; + context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency; } static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 73a013be1e48..9880d3e0398e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -17,9 +17,7 @@ struct dml2_context; struct dml2_configuration_options; struct dml2_initialize_instance_in_out; -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context); void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c index 930e86cdb88a..ee721606b883 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c @@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc, /* reset fams2 data */ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); + memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config)); if (dml_ctx->v21.mode_programming.programming->fams2_required) { @@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc, memcpy(static_base_state, &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params, sizeof(union dmub_cmd_fams2_config)); - memcpy(static_sub_state, - &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, - sizeof(union dmub_cmd_fams2_config)); + + if (dc->debug.fams_version.major == 3) { + memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams], + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2, + sizeof(union dmub_fams2_stream_static_sub_state_v2)); + } else { + memcpy(static_sub_state, + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, + sizeof(union dmub_cmd_fams2_config)); + } switch (dc->debug.fams_version.minor) { case 1: diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 208d3651b6ba..08f7f03b1023 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,8 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include <linux/vmalloc.h> - #include "dml2_internal_types.h" #include "dml_top.h" #include "dml2_core_dcn4_calcs.h" @@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) return true; } -static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) +static void dml21_populate_configuration_options(const struct dc *in_dc, + struct dml2_context *dml_ctx, + const struct dml2_configuration_options *config) { - bool disable_fams2; - struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options; - - /* ODM options */ - pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; - pmo_options->disable_dyn_odm_for_multi_stream = true; - pmo_options->disable_dyn_odm_for_stream_with_svp = true; + dml_ctx->config = *config; /* UCLK P-State options */ if (in_dc->debug.dml21_force_pstate_method) { @@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex } else { dml_ctx->config.pmo.force_pstate_method_enable = false; } - - pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); - - /* NOTE: DRR and SubVP Require FAMS2 */ - disable_fams2 = !in_dc->debug.fams2_config.bits.enable; - pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || - in_dc->debug.force_disable_subvp || - disable_fams2; - pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || - disable_fams2; - pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || - disable_fams2; - pmo_options->disable_fams2 = disable_fams2; - - pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || - in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; - pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; - break; - default: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid; - } - (*dml_ctx)->architecture = dml2_architecture_21; + dml_ctx->architecture = dml2_architecture_21; - /* Store configuration options */ - (*dml_ctx)->config = *config; + dml21_populate_configuration_options(in_dc, dml_ctx, config); DC_FP_START(); - /*Initialize SOCBB and DCNIP params */ - dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc); - - /* apply debug overrides */ - dml21_apply_debug_options(in_dc, *dml_ctx, config); + dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc); - /*Initialize DML21 instance */ - dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + dml2_initialize_instance(&dml_ctx->v21.dml_init); DC_FP_END(); } @@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s if (!dml21_allocate_memory(dml_ctx)) return false; - dml21_init(in_dc, dml_ctx, config); + dml21_init(in_dc, *dml_ctx, config); return true; } @@ -262,7 +224,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); /* Populate stream, plane mappings and other fields in display config. */ + DC_FP_START(); result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); + DC_FP_END(); if (!result) return false; @@ -317,7 +281,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); mode_support->dml2_instance = dml_init->dml2_instance; + DC_FP_START(); dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); + DC_FP_END(); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); @@ -328,12 +294,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co return true; } -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate) +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode) { bool out = false; - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) + /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) out = dml21_check_mode_support(in_dc, context, dml_ctx); else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); @@ -496,7 +463,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx, return true; } -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { dml21_init(in_dc, dml_ctx, config); } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h index 42e715024bc9..15f92029d2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h @@ -14,6 +14,7 @@ struct dc; struct dc_state; struct dml2_configuration_options; struct dml2_context; +enum dc_validate_mode; /** * dml2_create - Creates dml21_context. @@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, struct dml2_context *src_dml_ctx); bool dml21_create_copy(struct dml2_context **dst_dml_ctx, struct dml2_context *src_dml_ctx); -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config); +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); /** * dml21_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX + * will not populate context.res_ctx. * * Based on fast_validate option internally would call: * - * -dml21_mode_check_and_programming - for non fast_validate option + * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option * Calculates if dc_state can be supported on the input display * configuration. If supported, generates the necessary HW * programming for the new dc_state. * - * -dml21_check_mode_support - for fast_validate option + * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option * Calculates if dc_state can be supported for the input display * config. @@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate); +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode); /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h index c047d56527c4..a64ec4dcf11a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h @@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o */ bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 84c90050668c..91955bbe24b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -46,6 +46,7 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; + uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; @@ -158,6 +159,8 @@ struct dml2_dchub_watermark_regs { uint32_t sr_exit; uint32_t sr_enter_z8; uint32_t sr_exit_z8; + uint32_t sr_enter_low_power; + uint32_t sr_exit_low_power; uint32_t uclk_pstate; uint32_t fclk_pstate; uint32_t temp_read_or_ppt; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h index 255f05de362c..e8dc6471c0be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h @@ -222,6 +222,7 @@ struct dml2_composition_cfg { struct { bool enabled; + bool upsp_enabled; struct { double h_ratio; double v_ratio; @@ -426,6 +427,7 @@ struct dml2_stream_parameters { struct dml2_display_cfg { bool gpuvm_enable; + bool ffbm_enable; bool hostvm_enable; // Allocate DET proportionally between streams based on pixel rate diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h index 5f0bc42d1d2f..176f55947664 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h @@ -93,12 +93,17 @@ struct dml2_soc_power_management_parameters { double dram_clk_change_write_only_us; double fclk_change_blackout_us; double g7_ppt_blackout_us; + double g7_temperature_read_blackout_us; double stutter_enter_plus_exit_latency_us; double stutter_exit_latency_us; + double low_power_stutter_enter_plus_exit_latency_us; + double low_power_stutter_exit_latency_us; double z8_stutter_enter_plus_exit_latency_us; double z8_stutter_exit_latency_us; double z8_min_idle_time; double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE]; + double type_b_dram_clk_change_blackout_us; + double type_b_ppt_blackout_us; }; struct dml2_clk_table { @@ -130,6 +135,7 @@ struct dml2_soc_state_table { struct dml2_soc_vmin_clock_limits { unsigned long dispclk_khz; + unsigned long dcfclk_khz; }; struct dml2_soc_bb { @@ -138,6 +144,7 @@ struct dml2_soc_bb { struct dml2_soc_power_management_parameters power_management_parameters; struct dml2_soc_vmin_clock_limits vmin_limit; + double lower_bound_bandwidth_dchub; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 0dbf886d8926..41adb1104d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -16,9 +16,9 @@ struct dml2_instance; enum dml2_project_id { dml2_project_invalid = 0, - dml2_project_dcn4x_stage1 = 1, - dml2_project_dcn4x_stage2 = 2, - dml2_project_dcn4x_stage2_auto_drr_svp = 3, + dml2_project_dcn4x_stage1, + dml2_project_dcn4x_stage2, + dml2_project_dcn4x_stage2_auto_drr_svp, }; enum dml2_pstate_change_support { @@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate { dml2_output_rate_hdmi_rate_6x4 = 9, dml2_output_rate_hdmi_rate_8x4 = 10, dml2_output_rate_hdmi_rate_10x4 = 11, - dml2_output_rate_hdmi_rate_12x4 = 12 + dml2_output_rate_hdmi_rate_12x4 = 12, + dml2_output_rate_hdmi_rate_16x4 = 13, + dml2_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_pmo_options { @@ -279,7 +281,10 @@ struct dml2_per_stream_programming { } phantom_stream; union dmub_cmd_fams2_config fams2_base_params; - union dmub_cmd_fams2_config fams2_sub_params; + union { + union dmub_cmd_fams2_config fams2_sub_params; + union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2; + }; }; //----------------- @@ -412,6 +417,8 @@ struct dml2_display_cfg_programming { struct { bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition + uint8_t base_percent_efficiency; //LP1 + uint8_t low_power_percent_efficiency; //LP2 } stutter; struct { @@ -674,9 +681,14 @@ struct dml2_display_cfg_programming { // unlimited # of mcache struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES]; + bool failed_prefetch; + bool failed_uclk_pstate; bool failed_mcache_validation; bool failed_dpmm; bool failed_mode_programming; + bool failed_mode_programming_dcfclk; + bool failed_mode_programming_prefetch; + bool failed_mode_programming_flip; bool failed_map_watermarks; } informative; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 5b62cd19d979..bf62d42b3f78 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize( static double CalculateRequiredDispclk( enum dml2_odm_mode ODMMode, - double PixelClock) + double PixelClock, + bool isTMDS420) { + double DispClk; if (ODMMode == dml2_odm_mode_combine_4to1) { - return PixelClock / 4.0; + DispClk = PixelClock / 4.0; } else if (ODMMode == dml2_odm_mode_combine_3to1) { - return PixelClock / 3.0; + DispClk = PixelClock / 3.0; } else if (ODMMode == dml2_odm_mode_combine_2to1) { - return PixelClock / 2.0; + DispClk = PixelClock / 2.0; } else { - return PixelClock; + DispClk = PixelClock; + } + + if (isTMDS420) { + double TMDS420MinPixClock = PixelClock / 2.0; + DispClk = math_max2(DispClk, TMDS420MinPixClock); } + + return DispClk; } static double TruncToValidBPP( @@ -4122,11 +4131,12 @@ static noinline_for_stack void CalculateODMMode( bool success; bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0); enum dml2_odm_mode DecidedODMMode; + bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi); - SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock); - SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock); - SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock); - SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock); + SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420); + SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420); + SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420); + SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse); DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output); @@ -4861,7 +4871,7 @@ static double get_urgent_bandwidth_required( double ReadBandwidthChroma[], double PrefetchBandwidthLuma[], double PrefetchBandwidthChroma[], - double PrefetchBandwidthOto[], + double PrefetchBandwidthMax[], double excess_vactive_fill_bw_l[], double excess_vactive_fill_bw_c[], double cursor_bw[], @@ -4925,9 +4935,9 @@ static double get_urgent_bandwidth_required( l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k]; l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur; l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; - l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; + l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]; - surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto); + surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max); /* export peak required bandwidth for the surface */ surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]); @@ -5125,7 +5135,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->Tsw_est3 = 0.0; s->cursor_prefetch_bytes = 0; *p->prefetch_cursor_bw = 0; - *p->RequiredPrefetchBWOTO = 0.0; + *p->RequiredPrefetchBWMax = 0.0; dcc_mrq_enable = (p->dcc_enable && p->mrq_present); @@ -5356,7 +5366,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule * and the required bandwidth increases when going from ms to mp */ - *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto; + *p->RequiredPrefetchBWMax = s->prefetch_bw_oto; #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l); @@ -5718,8 +5728,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->TimeForFetchingVM = s->Tvm_equ; s->TimeForFetchingRowInVBlank = s->Tr0_equ; - *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + + /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming. + * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data + * bandwidth may end up higher than what was calculated in mode support. + */ + *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__); @@ -6115,7 +6131,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax l->zero_array, l->zero_array, l->zero_array, @@ -6152,7 +6168,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6189,7 +6205,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6226,7 +6242,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6263,7 +6279,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -7490,7 +7506,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k]; @@ -7635,7 +7651,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7802,7 +7818,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7908,6 +7924,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter } + static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params) { struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib; @@ -11256,7 +11273,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]; - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k]; @@ -11399,7 +11416,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw; @@ -11539,7 +11556,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw; calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw; calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw; calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma; calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma; @@ -11883,7 +11900,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex } //Maximum Bandwidth Used - s->TotalWRBandwidth = 0; + mode_lib->mp.TotalWRBandwidth = 0; for (k = 0; k < display_cfg->num_streams; ++k) { s->WRBandwidth = 0; if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) { @@ -11892,7 +11909,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000)) * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0); - s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth; + mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth; } } @@ -13062,6 +13079,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4; else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4) out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4; out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k]; out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k]; @@ -13246,7 +13267,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k]; out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k]; - out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0; + out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0; out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k]; out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k]; out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c index 28394de02885..640087e862f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c @@ -10,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance { bool result = false; - if (out == 0) + if (!out) return false; memset(out, 0, sizeof(struct dml2_core_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h index bdee6ad7bc59..ffb8c09f37a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h @@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe { double DCFClkDeepSleep; unsigned int DPPPerSurface; bool ScalerEnabled; + bool UPSPEnabled; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate { dml2_core_internal_output_rate_hdmi_rate_6x4 = 9, dml2_core_internal_output_rate_hdmi_rate_8x4 = 10, dml2_core_internal_output_rate_hdmi_rate_10x4 = 11, - dml2_core_internal_output_rate_hdmi_rate_12x4 = 12 + dml2_core_internal_output_rate_hdmi_rate_12x4 = 12, + dml2_core_internal_output_rate_hdmi_rate_16x4 = 13, + dml2_core_internal_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_core_internal_watermarks { @@ -198,6 +201,8 @@ struct dml2_core_internal_watermarks { double WritebackFCLKChangeWatermark; double StutterExitWatermark; double StutterEnterPlusExitWatermark; + double LowPowerStutterExitWatermark; + double LowPowerStutterEnterPlusExitWatermark; double Z8StutterExitWatermark; double Z8StutterEnterPlusExitWatermark; double USRRetrainingWatermark; @@ -260,12 +265,14 @@ struct dml2_core_internal_mode_support_info { bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; bool EnoughUrgentLatencyHidingSupport; + bool PrefetchScheduleSupported; bool PrefetchSupported; bool PrefetchBandwidthSupported; bool DynamicMetadataSupported; bool VRatioInPrefetchSupported; bool DISPCLK_DPPCLK_Support; bool TotalAvailablePipesSupport; + bool ODMSupport; bool ModeSupport; bool ViewportSizeSupport; @@ -314,9 +321,7 @@ struct dml2_core_internal_mode_support_info { double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - double max_urgent_latency_us; double max_non_urgent_latency_us; double avg_non_urgent_latency_us; @@ -329,6 +334,8 @@ struct dml2_core_internal_mode_support_info { bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; + bool dcfclk_support; + bool qos_bandwidth_support; }; struct dml2_core_internal_mode_support { @@ -350,9 +357,11 @@ struct dml2_core_internal_mode_support { double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes + double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes double uclk_freq_mhz; double dram_bw_mbps; double max_dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting @@ -394,9 +403,13 @@ struct dml2_core_internal_mode_support { double TWait[DML2_MAX_PLANES]; bool UnboundedRequestEnabled; + unsigned int compbuf_reserved_space_64b; + bool hw_debug5; unsigned int CompressedBufferSizeInkByte; double VRatioPreY[DML2_MAX_PLANES]; double VRatioPreC[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_l[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_c[DML2_MAX_PLANES]; unsigned int swath_width_luma_ub[DML2_MAX_PLANES]; unsigned int swath_width_chroma_ub[DML2_MAX_PLANES]; unsigned int RequiredSlots[DML2_MAX_PLANES]; @@ -417,8 +430,8 @@ struct dml2_core_internal_mode_support { double dst_y_prefetch[DML2_MAX_PLANES]; double LinesForVM[DML2_MAX_PLANES]; double LinesForDPTERow[DML2_MAX_PLANES]; - double SwathWidthYSingleDPP[DML2_MAX_PLANES]; - double SwathWidthCSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES]; unsigned int BytePerPixelY[DML2_MAX_PLANES]; unsigned int BytePerPixelC[DML2_MAX_PLANES]; double BytePerPixelInDETY[DML2_MAX_PLANES]; @@ -469,13 +482,58 @@ struct dml2_core_internal_mode_support { double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES]; + bool is_using_mall_for_ss[DML2_MAX_PLANES]; + unsigned int meta_row_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES]; + bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; + unsigned int meta_req_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES]; + unsigned int meta_req_width[DML2_MAX_PLANES]; + unsigned int meta_row_width[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear[DML2_MAX_PLANES]; + unsigned int PTERequestSizeY[DML2_MAX_PLANES]; + unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES]; + unsigned int PTERequestSizeC[DML2_MAX_PLANES]; + unsigned int meta_req_height[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int meta_req_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES]; + unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; + unsigned int vm_group_bytes[DML2_MAX_PLANES]; + unsigned int VReadyOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateWidthPix[DML2_MAX_PLANES]; + double TSetup[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; + unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos. + double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; + double MaxActiveFCLKChangeLatencySupported; + // Backend bool RequiresDSC[DML2_MAX_PLANES]; bool RequiresFEC[DML2_MAX_PLANES]; double OutputBpp[DML2_MAX_PLANES]; + double DesiredOutputBpp[DML2_MAX_PLANES]; + double PixelClockBackEnd[DML2_MAX_PLANES]; unsigned int DSCDelay[DML2_MAX_PLANES]; enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES]; enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES]; + bool TotalAvailablePipesSupportNoDSC; + bool TotalAvailablePipesSupportDSC; + unsigned int NumberOfDPPNoDSC; + unsigned int NumberOfDPPDSC; + enum dml2_odm_mode ODMModeNoDSC; + enum dml2_odm_mode ODMModeDSC; + double RequiredDISPCLKPerSurfaceNoDSC; + double RequiredDISPCLKPerSurfaceDSC; + unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES]; // Bandwidth Related Info double BandwidthAvailableForImmediateFlip; @@ -484,8 +542,14 @@ struct dml2_core_internal_mode_support { double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK]; double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES]; double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES]; - /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */ - double RequiredPrefetchBWOTO[DML2_MAX_PLANES]; + /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches. + * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp + * + * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support. + * Some slight difference in variables may cause the pixel data bandwidth to be higher + * even though overall equ prefetch bandwidths can be lower going from ms to mp + */ + double RequiredPrefetchBWMax[DML2_MAX_PLANES]; double cursor_bw[DML2_MAX_PLANES]; double prefetch_cursor_bw[DML2_MAX_PLANES]; double prefetch_vmrow_bw[DML2_MAX_PLANES]; @@ -538,7 +602,44 @@ struct dml2_core_internal_mode_support { bool mall_comb_mcache_c[DML2_MAX_PLANES]; bool lc_comb_mcache[DML2_MAX_PLANES]; + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + + unsigned int meta_row_height_luma[DML2_MAX_PLANES]; + unsigned int meta_row_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + + unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + + unsigned int MaximumVStartup[DML2_MAX_PLANES]; + + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; + unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; + + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -548,6 +649,7 @@ struct dml2_core_internal_mode_program { double FabricClock; /// <brief Basically just the clock freq at the min (or given) state //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double uclk_freq_mhz; unsigned int NoOfDPP[DML2_MAX_PLANES]; enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; @@ -599,6 +701,8 @@ struct dml2_core_internal_mode_program { unsigned int MacroTileHeightC[DML2_MAX_PLANES]; unsigned int MacroTileWidthY[DML2_MAX_PLANES]; unsigned int MacroTileWidthC[DML2_MAX_PLANES]; + double MaximumSwathWidthLuma[DML2_MAX_PLANES]; + double MaximumSwathWidthChroma[DML2_MAX_PLANES]; bool surf_linear128_l[DML2_MAX_PLANES]; bool surf_linear128_c[DML2_MAX_PLANES]; @@ -631,6 +735,14 @@ struct dml2_core_internal_mode_program { double UrgentBurstFactorChroma[DML2_MAX_PLANES]; double UrgentBurstFactorChromaPre[DML2_MAX_PLANES]; + double MaximumSwathWidthInLineBufferLuma; + double MaximumSwathWidthInLineBufferChroma; + + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + double meta_row_bw[DML2_MAX_PLANES]; unsigned int meta_row_bytes[DML2_MAX_PLANES]; unsigned int meta_req_width[DML2_MAX_PLANES]; @@ -652,7 +764,9 @@ struct dml2_core_internal_mode_program { unsigned int PTERequestSizeC[DML2_MAX_PLANES]; double TWait[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; double Tdmdl_vm[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; double Tdmdl[DML2_MAX_PLANES]; double TSetup[DML2_MAX_PLANES]; unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; @@ -684,6 +798,38 @@ struct dml2_core_internal_mode_program { double TCalc; unsigned int TotImmediateFlipBytes; + unsigned int MaxTotalDETInKByte; + unsigned int NomDETInKByte; + unsigned int MinCompressedBufferSizeInKByte; + double PixelClockBackEnd[DML2_MAX_PLANES]; + double OutputBpp[DML2_MAX_PLANES]; + bool dsc_enable[DML2_MAX_PLANES]; + unsigned int num_dsc_slices[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + bool immediate_flip_required; // any pipes need immediate flip + double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state + double TotalWRBandwidth; + double max_urgent_latency_us; + double df_response_time_us; + // ------------------- // Output // ------------------- @@ -694,9 +840,12 @@ struct dml2_core_internal_mode_program { // Support bool UrgVactiveBandwidthSupport; + bool PrefetchScheduleSupported; + bool UrgentBandwidthSupport; bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported bool ImmediateFlipSupported; bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES]; + bool dcfclk_support; // Clock double Dcfclk; @@ -730,6 +879,9 @@ struct dml2_core_internal_mode_program { double Z8StutterEfficiency; unsigned int Z8NumberOfStutterBurstsPerFrame; double Z8StutterEfficiencyNotIncludingVBlank; + double LowPowerStutterEfficiency; + double LowPowerStutterEfficiencyNotIncludingVBlank; + unsigned int LowPowerNumberOfStutterBurstsPerFrame; double StutterPeriod; double Z8StutterEfficiencyBestCase; unsigned int Z8NumberOfStutterBurstsPerFrameBestCase; @@ -788,7 +940,7 @@ struct dml2_core_internal_mode_program { // RQ registers bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; - + double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES]; bool is_using_mall_for_ss[DML2_MAX_PLANES]; @@ -869,6 +1021,8 @@ struct dml2_core_internal_SOCParametersList { double FCLKChangeLatency; double SRExitTime; double SREnterPlusExitTime; + double SRExitTimeLowPower; + double SREnterPlusExitTimeLowPower; double SRExitZ8Time; double SREnterPlusExitZ8Time; double USRRetrainingLatency; @@ -1001,10 +1155,10 @@ struct dml2_core_calcs_mode_programming_locals { double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; - unsigned int dummy_integer_array[2][DML2_MAX_PLANES]; + unsigned int dummy_integer_array[4][DML2_MAX_PLANES]; enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES]; double dummy_single_array[2][DML2_MAX_PLANES]; - unsigned int dummy_long_array[4][DML2_MAX_PLANES]; + unsigned int dummy_long_array[8][DML2_MAX_PLANES]; bool dummy_boolean_array[2][DML2_MAX_PLANES]; bool dummy_boolean[2]; double dummy_single[2]; @@ -1028,7 +1182,6 @@ struct dml2_core_calcs_mode_programming_locals { double dlg_vblank_start; double LSetup; double blank_lines_remaining; - double TotalWRBandwidth; double WRBandwidth; struct dml2_core_internal_DmlPipe myPipe; double PixelClockBackEndFactor; @@ -1153,6 +1306,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; // Output bool *PTEBufferSizeNotExceeded; @@ -1389,7 +1543,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals { double vm_row_bw; double flip_and_active_bw; double flip_and_prefetch_bw; - double flip_and_prefetch_bw_oto; + double flip_and_prefetch_bw_max; double active_and_excess_bw; }; @@ -1418,6 +1572,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals { struct dml2_core_shared_rq_dlg_get_dlg_reg_locals { unsigned int plane_idx; + unsigned int stream_idx; enum dml2_source_format_class source_format; const struct dml2_timing_cfg *timing; bool dual_plane; @@ -1625,6 +1780,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { double *BytePerPixDETC; unsigned int *DPPPerSurface; bool mrq_present; + unsigned int dummy[2][DML2_MAX_PLANES]; + unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES]; + unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES]; // output unsigned int *req_per_swath_ub_l; @@ -1642,6 +1800,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { unsigned int *DETBufferSizeC; unsigned int *full_swath_bytes_l; unsigned int *full_swath_bytes_c; + unsigned int *full_swath_bytes_single_dpp_l; + unsigned int *full_swath_bytes_single_dpp_c; bool *UnboundedRequestEnabled; unsigned int *compbuf_reserved_space_64b; unsigned int *CompressedBufferSizeInkByte; @@ -1698,9 +1858,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params { unsigned int CompbufReservedSpaceZs; bool hw_debug5; double SRExitTime; + double SRExitTimeLowPower; double SRExitZ8Time; bool SynchronizeTimings; double StutterEnterPlusExitWatermark; + double LowPowerStutterEnterPlusExitWatermark; double Z8StutterEnterPlusExitWatermark; bool ProgressiveToInterlaceUnitInOPP; double *MinTTUVBlank; @@ -1726,7 +1888,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params { // output double *StutterEfficiencyNotIncludingVBlank; double *StutterEfficiency; + double *LowPowerStutterEfficiencyNotIncludingVBlank; + double *LowPowerStutterEfficiency; unsigned int *NumberOfStutterBurstsPerFrame; + unsigned int *LowPowerNumberOfStutterBurstsPerFrame; double *Z8StutterEfficiencyNotIncludingVBlank; double *Z8StutterEfficiency; unsigned int *Z8NumberOfStutterBurstsPerFrame; @@ -1801,7 +1966,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params { double *VRatioPrefetchC; double *RequiredPrefetchPixelDataBWLuma; double *RequiredPrefetchPixelDataBWChroma; - double *RequiredPrefetchBWOTO; + double *RequiredPrefetchBWMax; bool *NotEnoughTimeForDynamicMetadata; double *Tno_bw; double *Tno_bw_flip; @@ -2038,7 +2203,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params { double *surface_read_bandwidth_c; double *prefetch_bandwidth_l; double *prefetch_bandwidth_c; - double *prefetch_bandwidth_oto; + double *prefetch_bandwidth_max; double *excess_vactive_fill_bw_l; double *excess_vactive_fill_bw_c; double *cursor_bw; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index 7a220c0141c2..5f301befed16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode) { - return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements); + return sw_mode == dml2_sw_linear; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index f486b090bbfc..22969a533a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo if (result) result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk); - if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk); - for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { if (result) result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c index 3861bc6c9621..dfd01440737d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c @@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance { bool result = false; - if (out == 0) + if (!out) return false; memset(out, 0, sizeof(struct dml2_dpmm_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c index cd3fbc0591d8..c60b8fe90819 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c @@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance * { bool result = false; - if (out == 0) + if (!out) return false; memset(out, 0, sizeof(struct dml2_mcg_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c index e763c8e45da8..1b9579a32ff2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c @@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con static void remove_duplicates(double *list_a, int *list_a_size) { - int cur_element = 0; - // For all elements b[i] in list_b[] - while (cur_element < *list_a_size - 1) { - if (list_a[cur_element] == list_a[cur_element + 1]) { - for (int j = cur_element + 1; j < *list_a_size - 1; j++) { - list_a[j] = list_a[j + 1]; - } - *list_a_size = *list_a_size - 1; - } else { - cur_element++; + int j = 0; + + if (*list_a_size == 0) + return; + + for (int i = 1; i < *list_a_size; i++) { + if (list_a[j] != list_a[i]) { + j++; + list_a[j] = list_a[i]; } } + + *list_a_size = j + 1; } static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c index 7ed0242a4b33..55d2464365d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c @@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance * { bool result = false; - if (out == 0) + if (!out) return false; memset(out, 0, sizeof(struct dml2_pmo_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h index b226225103c3..611c80f4f1bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h @@ -10,15 +10,74 @@ #define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN #define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__) -/* ASSERT with message output */ -#define DML_ASSERT_MSG(condition, fmt, ...) \ - do { \ - if (!(condition)) { \ - DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \ - DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ - DML_ASSERT(condition); \ - } \ - } while (0) +/* private helper macros */ +#define _BOOL_FORMAT(field) "%s", field ? "true" : "false" +#define _UINT_FORMAT(field) "%u", field +#define _INT_FORMAT(field) "%d", field +#define _DOUBLE_FORMAT(field) "%lf", field +#define _ELEMENT_FUNC "function" +#define _ELEMENT_COMP_IF "component_interface" +#define _ELEMENT_TOP_IF "top_interface" +#define _LOG_ENTRY(element) do { \ + DML_LOG_INTERNAL("<"element" name=\""); \ + DML_LOG_INTERNAL(__func__); \ + DML_LOG_INTERNAL("\">\n"); \ +} while (0) +#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n") +#define _LOG_SCALAR(field, format) do { \ + DML_LOG_INTERNAL(#field" = "format(field)); \ + DML_LOG_INTERNAL("\n"); \ +} while (0) +#define _LOG_ARRAY(field, size, format) do { \ + DML_LOG_INTERNAL(#field " = ["); \ + for (int _i = 0; _i < (int) size; _i++) { \ + DML_LOG_INTERNAL(format(field[_i])); \ + if (_i + 1 == (int) size) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ +}} while (0) +#define _LOG_2D_ARRAY(field, size0, size1, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL(format(field[_i][_j])); \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) +#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL("["); \ + for (int _k = 0; _k < (int) size2; _k++) { \ + DML_LOG_INTERNAL(format(field[_i][_j][_k])); \ + if (_k + 1 == (int) size2) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) /* fatal errors for unrecoverable DML states until a full reset */ #define DML_LOG_LEVEL_FATAL 0 @@ -28,7 +87,7 @@ #define DML_LOG_LEVEL_WARN 2 /* high level tracing of DML interfaces */ #define DML_LOG_LEVEL_INFO 3 -/* detailed tracing of DML internal components */ +/* tracing of DML internal executions */ #define DML_LOG_LEVEL_DEBUG 4 /* detailed tracing of DML calculation procedure */ #define DML_LOG_LEVEL_VERBOSE 5 @@ -37,30 +96,94 @@ #define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT #endif /* #ifndef DML_LOG_LEVEL */ +/* public macros for DML_LOG_LEVEL_FATAL and up */ #define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_ERROR and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR #define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__) +#define DML_ASSERT_MSG(condition, fmt, ...) \ + do { \ + if (!(condition)) { \ + DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \ + DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ + DML_ASSERT(condition); \ + } \ + } while (0) #else #define DML_LOG_ERROR(fmt, ...) ((void)0) +#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_WARN and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN #define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__) #else #define DML_LOG_WARN(fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_INFO and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO #define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__) +#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF) +#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF) #else #define DML_LOG_INFO(fmt, ...) ((void)0) +#define DML_LOG_TOP_IF_ENTER() ((void)0) +#define DML_LOG_TOP_IF_EXIT() ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_DEBUG and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG -#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__) +#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) +#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF) +#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF) +#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC) +#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC) +#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT) +#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT) +#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT) +#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT) #else #define DML_LOG_DEBUG(fmt, ...) ((void)0) +#define DML_LOG_COMP_IF_ENTER() ((void)0) +#define DML_LOG_COMP_IF_EXIT() ((void)0) +#define DML_LOG_FUNC_ENTER() ((void)0) +#define DML_LOG_FUNC_EXIT() ((void)0) +#define DML_LOG_DEBUG_BOOL(field) ((void)0) +#define DML_LOG_DEBUG_UINT(field) ((void)0) +#define DML_LOG_DEBUG_INT(field) ((void)0) +#define DML_LOG_DEBUG_DOUBLE(field) ((void)0) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_VERBOSE */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE -#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__) +#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) #else #define DML_LOG_VERBOSE(fmt, ...) ((void)0) -#endif +#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */ #endif /* __DML2_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h index 00688b9f1df4..d52aa82283b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h @@ -202,6 +202,8 @@ struct dml2_core_mode_support_result { } active; unsigned int dispclk_khz; + unsigned int dpprefclk_khz; + unsigned int dtbrefclk_khz; unsigned int dcfclk_deepsleep_khz; unsigned int socclk_khz; @@ -446,13 +448,17 @@ struct dml2_core_internal_state_intermediates { }; struct dml2_core_mode_support_locals { - struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + union { + struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params; }; struct dml2_core_mode_programming_locals { - struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + union { + struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 5f1b49a50049..4cfe64aa8492 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes) { bool sorted, swapped; unsigned int cur_index; - unsigned int temp; int odm_slice_index; for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) { @@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes) swapped = false; while (!sorted) { if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) { - temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]; - pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]; - pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp; + swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1], + pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]); swapped = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 6b3b8803e0ae..c59f825cfae9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -654,14 +654,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state unsigned int svp_height, unsigned int svp_vstartup) { - unsigned int i, pipe_idx; + unsigned int i; double line_time, fp_and_sync_width_time; struct pipe_ctx *pipe; uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines; static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000)); // Find DML pipe index (pipe_idx) using dc_pipe_idx - for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) { + for (i = 0; i < ctx->config.dcn_pipe_count; i++) { pipe = &state->res_ctx.pipe_ctx[i]; if (!pipe->stream) @@ -669,8 +669,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state if (i == dc_pipe_idx) break; - - pipe_idx++; } // Calculate lines required for pstate allow width and FW processing delays @@ -868,7 +866,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 208630754c8a..3b866e876bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -1189,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2 return location; } -static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg) -{ - int i; - - if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - ASSERT(state->stream_count == 1); - dml_dispcfg->timing.DRRDisplay[0] = true; - } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) { - - for (i = 0; i < dml_dispcfg->num_timings; i++) { - if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id) - dml_dispcfg->timing.DRRDisplay[i] = true; - } - } -} - static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) { unsigned int i; @@ -1437,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat } } } - - if (!dml2->config.use_native_pstate_optimization) - apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg); } void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 525b7d04bf84..9deb03a18ccc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -24,8 +24,6 @@ * */ -#include <linux/vmalloc.h> - #include "display_mode_core.h" #include "dml2_internal_types.h" #include "dml2_utils.h" @@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2, static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, const struct dml_display_cfg_st *display_cfg, - struct dml_mode_support_info_st *evaluation_info) + struct dml_mode_support_info_st *evaluation_info, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx; s->mode_support_params.in_display_cfg = display_cfg; + if (validate_mode == DC_VALIDATE_MODE_ONLY) + s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1; + else + s->mode_support_params.in_start_state_idx = 0; s->mode_support_params.out_evaluation_info = evaluation_info; memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st)); @@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p) { int unused_dpps = p->ip_params->max_num_dpp; - int i, j; - int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count; - int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index; - float frame_time_sec, max_frame_time_sec; + int i; + int odms_needed; int largest_blend_and_timing = 0; bool optimization_done = false; @@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe if (p->new_display_config != p->cur_display_config) *p->new_display_config = *p->cur_display_config; - // Optimize P-State Support - if (dml2->config.use_native_pstate_optimization) { - if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) { - // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp - subvp_timing_to_add = -1; - subvp_surface_to_add = -1; - max_frame_time_sec = 0; - surface_count = 0; - for (i = 0; i < (int) p->cur_display_config->num_timings; i++) { - refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000, - (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i])); - if (refresh_rate_hz < 120) { - // Check its upstream surfaces to see if this one could be converted to subvp. - dpps_needed = 0; - for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) { - if (p->cur_display_config->plane.BlendingAndTiming[j] == i && - p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) { - dpps_needed += p->cur_mode_support_info->DPPPerSurface[j]; - subvp_surface_to_add = j; - surface_count++; - } - } - - if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) { - frame_time_sec = (float)1 / refresh_rate_hz; - if (frame_time_sec > max_frame_time_sec) { - max_frame_time_sec = frame_time_sec; - subvp_timing_to_add = i; - } - } - } - } - if (subvp_timing_to_add >= 0) { - new_timing_index = p->new_display_config->num_timings++; - new_surface_index = p->new_display_config->num_surfaces++; - // Add a phantom pipe reflecting the main pipe's timing - dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add); - - pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us + - p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) * - (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) / - (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]); - - subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines; - - p->new_display_config->timing.VActive[new_timing_index] = subvp_height; - p->new_display_config->timing.VTotal[new_timing_index] = subvp_height + - p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add]; - - p->new_display_config->output.OutputDisabled[new_timing_index] = true; - - p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport; - - dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add); - dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add); - - p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportStationary[new_surface_index] = false; - - p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable; - p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe; - - p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0; - - p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required; - - p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index; - - optimization_done = true; - } - } - } // Optimize Clocks if (!optimization_done) { @@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe return optimization_done; } -static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state) +static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch; struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch; @@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us; } - dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info); + dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info, + validate_mode); if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) { map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info); @@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const } static bool dml_mode_support_wrapper(struct dml2_context *dml2, - struct dc_state *display_state) + struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; unsigned int result = 0, i; @@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info); @@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, dml2->v20.dml_core_ctx.policy = s->new_policy; optimized_result = pack_and_call_dml_mode_support_ex(dml2, &s->new_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (optimized_result) optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info); @@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, if (!optimized_result) { result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); } } @@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, return result; } -static int find_drr_eligible_stream(struct dc_state *display_state) -{ - int i; - - for (i = 0; i < display_state->stream_count; i++) { - if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE - && display_state->streams[i]->ignore_msa_timing_param) { - // Use ignore_msa_timing_param flag to identify as DRR - return i; - } - } - - return -1; -} - -static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state) -{ - struct dml2_wrapper_scratch *s = &dml2->v20.scratch; - bool pstate_optimization_done = false; - bool pstate_optimization_success = false; - bool result = false; - int drr_display_index = 0, non_svp_streams = 0; - bool force_svp = dml2->config.svp_pstate.force_enable_subvp; - - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - - result = dml_mode_support_wrapper(dml2, display_state); - - if (!result) { - pstate_optimization_done = true; - } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } - - if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) { - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; - - result = dml_mode_support_wrapper(dml2, display_state); - } else { - non_svp_streams = display_state->stream_count; - - while (!pstate_optimization_done) { - result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true); - - // Always try adding SVP first - if (result) - result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info); - else - pstate_optimization_done = true; - - - if (result) { - result = dml_mode_support_wrapper(dml2, display_state); - } else { - pstate_optimization_done = true; - } - - if (result) { - non_svp_streams--; - - if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } else { - drr_display_index = find_drr_eligible_stream(display_state); - - // If there is only 1 remaining non SubVP pipe that is DRR, check static - // schedulability for SubVP + DRR. - if (non_svp_streams == 1 && drr_display_index >= 0) { - if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) { - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index; - result = dml_mode_support_wrapper(dml2, display_state); - } - - if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } - - if (pstate_optimization_success) { - pstate_optimization_done = true; - } else { - pstate_optimization_done = false; - } - } - } - } - } - - if (!pstate_optimization_success) { - dml2_svp_remove_all_phantom_pipes(dml2, display_state); - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - result = dml_mode_support_wrapper(dml2, display_state); - } - - return result; -} - -static bool call_dml_mode_support_and_programming(struct dc_state *context) +static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode) { unsigned int result = 0; unsigned int min_state = 0; @@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) struct dml2_wrapper_scratch *s = &dml2->v20.scratch; if (!context->streams[0]->sink->link->dc->caps.is_apu) { - min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context); + min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context, + validate_mode); ASSERT(min_state_for_g6_temp_read >= 0); } - if (!dml2->config.use_native_pstate_optimization) { - result = optimize_pstate_with_svp_and_drr(dml2, context); - } else { - result = dml_mode_support_wrapper(dml2, context); - } + result = dml_mode_support_wrapper(dml2, context, validate_mode); /* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on. * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly. @@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) return result; } -static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context) +static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { struct dml2_context *dml2 = context->bw_ctx.dml2; struct dml2_wrapper_scratch *s = &dml2->v20.scratch; @@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4); - result = call_dml_mode_support_and_programming(context); + result = call_dml_mode_support_and_programming(context, validate_mode); /* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation * is required or not, the resource context needs to correctly reflect the number of active pipes. We would * only know the correct number if active pipes after dml2_map_dc_pipes is called. @@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch); if (need_recalculation) { /* Engage the DML again if recalculation is required. */ - call_dml_mode_support_and_programming(context); + call_dml_mode_support_and_programming(context, validate_mode); if (!dml2->config.skip_hw_state_mapping) { dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state); } @@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s return result; } -static bool dml2_validate_only(struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode) { struct dml2_context *dml2; unsigned int result = 0; @@ -708,12 +529,13 @@ static bool dml2_validate_only(struct dc_state *context) result = pack_and_call_dml_mode_support_ex(dml2, &dml2->v20.scratch.cur_display_config, - &dml2->v20.scratch.mode_support_info); + &dml2->v20.scratch.mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info); - return (result == 1) ? true : false; + return result == 1; } static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2) @@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, + enum dc_validate_mode validate_mode) { bool out = false; @@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 /* DML2.1 validation path */ if (dml2->architecture == dml2_architecture_21) { - out = dml21_validate(in_dc, context, dml2, fast_validate); + out = dml21_validate(in_dc, context, dml2, validate_mode); return out; } DC_FP_START(); - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) - out = dml2_validate_only(context); + /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + out = dml2_validate_only(context, validate_mode); else - out = dml2_validate_and_build_resource(in_dc, context); + out = dml2_validate_and_build_resource(in_dc, context, validate_mode); DC_FP_END(); @@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } @@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. - if ((in_dc->debug.using_dml21) - && (in_dc->ctx->dce_version == DCN_VERSION_4_01 - )) + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) return dml21_create(in_dc, dml2, config); // Allocate Mode Lib Ctx @@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 5100f269368e..c384e141cebc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -240,7 +240,7 @@ struct dml2_configuration_options { bool use_clock_dc_limits; bool gpuvm_enable; bool force_tdlut_enable; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; /* @@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc, * dml2_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx. * * DML1.0 compatible interface for validation. * @@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc, bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info. diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c index 75fb77bca83b..01480a04f85e 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -520,6 +520,15 @@ void dpp1_dppclk_control( REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0); } +void dpp_force_disable_cursor(struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + /* Force disable cursor */ + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0); + dpp_base->pos.cur0_ctl.bits.cur0_enable = 0; +} + static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index c48139bed11f..f466182963f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1, void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, struct dpp_grph_csc_adjustment *adjust); +void dpp_force_disable_cursor(struct dpp *dpp_base); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 2d70586cef40..09be2a90cc79 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -1494,6 +1494,7 @@ static struct dpp_funcs dcn30_dpp_funcs = { .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, + .dpp_force_disable_cursor = dpp_force_disable_cursor, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 97bf26fa3573..36187f890d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp401_full_bypass, + .dpp_full_bypass = NULL, .dpp_setup = dpp401_dpp_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h index ecaa976e1f52..5f6b431ec398 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h @@ -641,6 +641,7 @@ uint32_t ISHARP_DELTA_DATA; \ uint32_t ISHARP_DELTA_INDEX; \ uint32_t ISHARP_NLDELTA_SOFT_CLIP + struct dcn401_dpp_registers { DPP_REG_VARIABLE_LIST_DCN401; }; @@ -672,6 +673,16 @@ struct dcn401_dpp { struct pwl_params pwl_data; }; +enum dcn401_dscl_mode_sel { + DCN401_DSCL_MODE_SCALING_444_BYPASS = 0, + DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DCN401_DSCL_MODE_DSCL_BYPASS = 6 +}; + bool dpp401_construct(struct dcn401_dpp *dpp401, struct dc_context *ctx, uint32_t inst, @@ -683,8 +694,6 @@ void dpp401_dscl_set_scaler_manual_scale( struct dpp *dpp_base, const struct scaler_data *scl_data); -void dpp401_full_bypass(struct dpp *dpp_base); - void dpp401_dpp_setup( struct dpp *dpp_base, enum surface_pixel_format format, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 712aff7e17f7..7aab77b58869 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -88,30 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -void dpp401_full_bypass(struct dpp *dpp_base) -{ - struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - void dpp401_set_cursor_attributes( struct dpp *dpp_base, struct dc_cursor_attributes *cursor_attributes) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c index 2f92e7d4981b..6df3419f825f 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c @@ -78,16 +78,6 @@ enum dscl_autocal_mode { AUTOCAL_MODE_AUTOREPLICATE = 3 }; -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) { if (depth == LB_PIXEL_DEPTH_30BPP) @@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format) return false; } -static enum dscl_mode_sel dpp401_dscl_get_dscl_mode( +static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode( struct dpp *dpp_base, const struct scaler_data *data, bool dbg_always_scale) @@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode( if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { /* DSCL is processing data in fixed format */ if (data->format == PIXEL_FORMAT_FP16) - return DSCL_MODE_DSCL_BYPASS; + return DCN401_DSCL_MODE_DSCL_BYPASS; } if (data->ratios.horz.value == one @@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode( && data->ratios.horz_c.value == one && data->ratios.vert_c.value == one && !dbg_always_scale) - return DSCL_MODE_SCALING_444_BYPASS; + return DCN401_DSCL_MODE_SCALING_444_BYPASS; if (!dpp401_dscl_is_420_format(data->format)) { if (dpp401_dscl_is_video_format(data->format)) - return DSCL_MODE_SCALING_444_YCBCR_ENABLE; + return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE; else - return DSCL_MODE_SCALING_444_RGB_ENABLE; + return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE; } if (data->ratios.horz.value == one && data->ratios.vert.value == one) - return DSCL_MODE_SCALING_420_LUMA_BYPASS; + return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS; if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) - return DSCL_MODE_SCALING_420_CHROMA_BYPASS; + return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS; - return DSCL_MODE_SCALING_420_YCBCR_ENABLE; + return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE; } static void dpp401_power_on_dscl( @@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1; uint32_t h_num_taps = scl_data->taps.h_taps - 1; uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1; - enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode( + enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode( dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN && scl_data->format <= PIXEL_FORMAT_VIDEO_END; @@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, dpp->scl_data = *scl_data; if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) { - dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode; + dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode; rect = (struct rect *)&scl_data->dscl_prog_data.recout; mpc_width = scl_data->dscl_prog_data.mpc_size.width; mpc_height = scl_data->dscl_prog_data.mpc_size.height; @@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c; } if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) { - if (dscl_mode != DSCL_MODE_DSCL_BYPASS) + if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS) dpp401_power_on_dscl(dpp_base, true); } @@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, /* SCL mode */ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - if (dscl_mode == DSCL_MODE_DSCL_BYPASS) { + if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) { if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) dpp401_power_on_dscl(dpp_base, false); return; @@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data); dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) { + if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) { if (dpp->base.ctx->dc->config.prefer_easf) dpp401_dscl_disable_easf(dpp_base, scl_data); dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 11535922b5ff..e4144b244332 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -30,6 +30,9 @@ #include "rc_calc.h" #include "fixed31_32.h" +#include "clk_mgr.h" +#include "resource.h" + #define DC_LOGGER \ dsc->ctx->logger @@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } /* Forward Declerations */ +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing); + static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, @@ -183,6 +191,7 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slice_count, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, return true; } - /* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and * timing's pixel clock and uncompressed bandwidth. * If DSC is not possible, leave '*range' untouched. @@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; + unsigned int min_dsc_slice_count; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config = {0}; @@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range( get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, link_encoding, &config); + &options, link_encoding, min_dsc_slice_count, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, @@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc, DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp); } + +static void build_dsc_enc_combined_slice_caps( + const struct dsc_enc_caps *single_dsc_enc_caps, + struct dsc_enc_caps *dsc_enc_caps, + unsigned int max_odm_combine_factor) +{ + /* 1-16 slice configurations, single DSC */ + dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw; + + /* 2x DSC's */ + if (max_odm_combine_factor >= 2) { + /* 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + + /* 8 + 8 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + } + + /* 3x DSC's */ + if (max_odm_combine_factor >= 3) { + /* 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } + + /* 4x DSC's */ + if (max_odm_combine_factor >= 4) { + /* 1 + 1 + 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 + 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 3 + 3 + 3 + 3 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3; + + /* 4 + 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } +} + +static void build_dsc_enc_caps( + const struct display_stream_compressor *dsc, + struct dsc_enc_caps *dsc_enc_caps) +{ + unsigned int max_dscclk_khz; + unsigned int num_dsc; + unsigned int max_odm_combine_factor; + struct dsc_enc_caps single_dsc_enc_caps; + + struct dc *dc; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps) + return; + + dc = dsc->ctx->dc; + + if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc) + return; + + /* get max DSCCLK from clk_mgr */ + max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK); + + dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz); + + /* global capabilities */ + dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version; + dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth; + dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported; + dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width; + dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div; + dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw; + dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw; + + /* expand per DSC capabilities to global */ + max_odm_combine_factor = dc->caps.max_odm_combine_factor; + num_dsc = dc->res_pool->res_cap->num_dsc; + max_odm_combine_factor = min(max_odm_combine_factor, num_dsc); + dsc_enc_caps->max_total_throughput_mps = + single_dsc_enc_caps.max_total_throughput_mps * + max_odm_combine_factor; + + /* check slice counts possible for with ODM combine */ + build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor); +} + +static inline uint32_t dsc_div_by_10_round_up(uint32_t value) +{ + return (value + 9) / 10; +} + +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing) +{ + unsigned int max_dispclk_khz; + + /* get max pixel rate and combine caps */ + max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000; + if (dsc && dsc->ctx->dc) { + if (dsc->ctx->dc->clk_mgr && + dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) { + /* dispclk is available */ + max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK); + } + } + + /* validate parameters */ + if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0) + return 1; + + /* consider minimum odm slices required due to + * 1) display pipe throughput (dispclk) + * 2) max image width per slice + */ + return dc_fixpt_ceil(dc_fixpt_max( + dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)), + max_dispclk_khz), // throughput + dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right), + dsc_enc_caps->max_slice_width))); // slice width +} + static void get_dsc_enc_caps( const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { - // This is a static HW query, so we can use any DSC - memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) { - if (!dsc->ctx->dc->debug.disable_dsc) - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); - if (dsc->ctx->dc->debug.native422_support) - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc) + return; + + /* check if reported cap global or only for a single DCN DSC enc */ + if (dsc->funcs->dsc_get_enc_caps) { + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + } else { + build_dsc_enc_caps(dsc, dsc_enc_caps); } + + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } /* Returns 'false' if no intersection was found for at least one capability. @@ -621,11 +765,6 @@ static bool intersect_dsc_caps( return true; } -static inline uint32_t dsc_div_by_10_round_up(uint32_t value) -{ - return (value + 9) / 10; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -910,11 +1049,11 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slices_h, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; int max_slices_h = 0; - int min_slices_h = 0; int num_slices_h = 0; int pic_width; int slice_width; @@ -1018,12 +1157,14 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - min_slices_h = pic_width / dsc_common_caps.max_slice_width; - if (pic_width % dsc_common_caps.max_slice_width) - min_slices_h++; + /* increase miniumum slice count to meet sink slice width limitations */ + min_slices_h = dc_fixpt_ceil(dc_fixpt_max( + dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min + dc_fixpt_from_int(min_slices_h))); // source min min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h); + /* increase minimum slice count to meet sink throughput limitations */ while (min_slices_h <= max_slices_h) { int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h; if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000) @@ -1032,14 +1173,12 @@ static bool setup_dsc_config( min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); } - is_dsc_possible = (min_slices_h <= max_slices_h); - - if (pic_width % min_slices_h != 0) - min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? - - if (min_slices_h == 0 && max_slices_h == 0) - is_dsc_possible = false; + /* increase minimum slice count to meet divisibility requirements */ + while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) { + min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); + } + is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0; if (!is_dsc_possible) goto done; @@ -1162,12 +1301,19 @@ bool dc_dsc_compute_config( { bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - + unsigned int min_dsc_slice_count; get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, link_encoding, dsc_cfg); + timing, + options, + link_encoding, + min_dsc_slice_count, + dsc_cfg); return is_dsc_possible; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index bd1b9aef6d5c..89f0d999bf35 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -406,9 +406,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; + // Need to find the ceiling value for the slice width + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. - dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h; dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 4222679fd4c9..7bd92ae8b13e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -9,19 +9,14 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000 -#define MAX_DSC_UNIT_COMBINE 4 - static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); /* Object I/F functions */ //static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); //static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn401_dsc_funcs = { - .dsc_get_enc_caps = dsc401_get_enc_caps, .dsc_read_state = dsc401_read_state, .dsc_validate_stream = dsc401_validate_stream, .dsc_set_config = dsc401_set_config, @@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disable = dsc401_disable, .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -66,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc, dsc->max_image_width = 5184; } -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) { - int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ; - dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ - /* 1 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0; - /* 2 slice is only supported with 1 or 2 DSC units */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0; - /* 3 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; dsc_enc_caps->lb_bit_depth = 13; dsc_enc_caps->is_block_pred_supported = true; @@ -95,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; - dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE; + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ @@ -191,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc) DSC_CLOCK_EN, 0); } -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) { struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h index e3ca70058e64..7acd57eb4f42 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h @@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe); void dsc401_disable(struct display_stream_compressor *dsc); void dsc401_disconnect(struct display_stream_compressor *dsc); +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 1ebce5426a58..b0bd1f9425b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -108,6 +108,7 @@ struct dsc_funcs { void (*dsc_disable)(struct display_stream_compressor *dsc); void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); + void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 1313a7c5d87b..73a1e6a03719 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -28,7 +28,7 @@ #include "include/hdcp_msg_types.h" #include "include/signal_types.h" #include "core_types.h" -#include "link.h" +#include "link_service.h" #include "link_hwss.h" #include "link/protocols/link_dpcd.h" diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c index d347bb06577a..e7e5f6d4778e 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c @@ -440,6 +440,35 @@ void hubbub3_init_watermarks(struct hubbub *hubbub) REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); } +void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + + REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0], + DET0_SIZE, &target_det_sizes[0]); + + REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1], + DET1_SIZE, &target_det_sizes[1]); + + REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2], + DET2_SIZE, &target_det_sizes[2]); + + REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3], + DET3_SIZE, &target_det_sizes[3]); + +} + +uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t compbuf_config_error = 0; + + REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, + &compbuf_config_error); + + return compbuf_config_error; +} + static const struct hubbub_funcs hubbub30_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -457,6 +486,8 @@ static const struct hubbub_funcs hubbub30_funcs = { .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, + .get_det_sizes = hubbub3_get_det_sizes, + .compbuf_config_error = hubbub3_compbuf_config_error, }; void hubbub3_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h index ca6233e8f1f4..49a469969d36 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h @@ -133,4 +133,10 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub, void hubbub3_init_watermarks(struct hubbub *hubbub); +void hubbub3_get_det_sizes(struct hubbub *hubbub, + uint32_t *curr_det_sizes, + uint32_t *target_det_sizes); + +uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c index b98505b240a7..cdb20251a154 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c @@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, + .get_det_sizes = hubbub3_get_det_sizes, + .compbuf_config_error = hubbub3_compbuf_config_error, }; void hubbub31_construct(struct dcn20_hubbub *hubbub31, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c index 32a6be543105..4d4ca6d77bbd 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c @@ -28,6 +28,7 @@ #include "dcn32_hubbub.h" #include "dm_services.h" #include "reg_helper.h" +#include "dal_asic_id.h" #define CTX \ @@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub) REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); } +static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, + SDPIF_PORT_CONTROL, dc_control); +} + void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks( unsigned int refclk_mhz, bool safe_to_lower) { + struct dc *dc = hubbub->ctx->dc; bool wm_pending = false; + if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program && + (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) || + ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) { + /* before raising watermarks, SDP control give to DF, stutter must be disabled */ + wm_pending = true; + hubbub32_set_sdp_control(hubbub, false); + hubbub1_allow_self_refresh_control(hubbub, false); + } + if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; @@ -786,10 +805,20 @@ static bool hubbub32_program_watermarks( REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ - if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter) - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + if (safe_to_lower) { + /* after lowering watermarks, stutter setting is restored, SDP control given to DC */ + hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter); + + if (dc->debug.disable_stutter_for_wm_program && + (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) || + ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) { + hubbub32_set_sdp_control(hubbub, true); + } + } else if (dc->debug.disable_stutter) { + hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter); + } - hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); + hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow); return wm_pending; } @@ -974,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub) ignore the "df_pre_cstate_req" from the SDP port control. only the DCN will determine when to connect the SDP port */ - REG_UPDATE(DCHUBBUB_SDPIF_CFG0, - SDPIF_PORT_CONTROL, 1); + hubbub32_set_sdp_control(hubbub, true); /*Set SDP's max outstanding request to 512 must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ REG_UPDATE(DCHUBBUB_SDPIF_CFG1, @@ -1009,6 +1037,8 @@ static const struct hubbub_funcs hubbub32_funcs = { .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .set_request_limit = hubbub32_set_request_limit, .get_mall_en = hubbub32_get_mall_en, + .get_det_sizes = hubbub3_get_det_sizes, + .compbuf_config_error = hubbub3_compbuf_config_error, }; void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index 6d41953011f5..a443722a8632 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -589,6 +589,8 @@ static const struct hubbub_funcs hubbub35_funcs = { .hubbub_read_state = hubbub2_read_state, .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .dchubbub_init = hubbub35_init, + .get_det_sizes = hubbub3_get_det_sizes, + .compbuf_config_error = hubbub3_compbuf_config_error, }; void hubbub35_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index 92fab471b183..a36273a52880 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1247,6 +1247,8 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, .program_arbiter = dcn401_program_arbiter, + .get_det_sizes = hubbub3_get_det_sizes, + .compbuf_config_error = hubbub3_compbuf_config_error, }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index c7765e6f09e6..cf2eb9793008 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -104,7 +104,8 @@ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\ - SRI(HUBP_CLK_CNTL, HUBP, id) + SRI(HUBP_CLK_CNTL, HUBP, id),\ + SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id) /* Register address initialization macro for ASICs with VM */ #define HUBP_REG_LIST_DCN_VM(id)\ @@ -249,7 +250,8 @@ uint32_t CURSOR_POSITION; \ uint32_t CURSOR_HOT_SPOT; \ uint32_t CURSOR_DST_OFFSET; \ - uint32_t HUBP_CLK_CNTL + uint32_t HUBP_CLK_CNTL; \ + uint32_t HUBPRET_READ_LINE_VALUE #define HUBP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -622,6 +624,8 @@ type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\ + type PIPE_READ_LINE;\ + type HUBP_SEG_ALLOC_ERR_STATUS;\ /* todo: get these from GVM instead of reading registers ourselves */\ type PAGE_DIRECTORY_ENTRY_HI32;\ type PAGE_DIRECTORY_ENTRY_LO32;\ @@ -666,10 +670,30 @@ struct dcn_mi_mask { DCN_HUBP_REG_FIELD_LIST(uint32_t); }; +struct dcn_fl_regs_st { + uint32_t lut_enable; + uint32_t lut_done; + uint32_t lut_addr_mode; + uint32_t lut_width; + uint32_t lut_mpc_width; + uint32_t lut_tmz; + uint32_t lut_crossbar_sel_r; + uint32_t lut_crossbar_sel_g; + uint32_t lut_crossbar_sel_b; + uint32_t lut_addr_hi; + uint32_t lut_addr_lo; + uint32_t refcyc_3dlut_group; + uint32_t lut_fl_bias; + uint32_t lut_fl_scale; + uint32_t lut_fl_mode; + uint32_t lut_fl_format; +}; + struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; struct _vcs_dpi_display_ttu_regs_st ttu_attr; struct _vcs_dpi_display_rq_regs_st rq_regs; + struct dcn_fl_regs_st fl_regs; uint32_t pixel_format; uint32_t inuse_addr_hi; uint32_t inuse_addr_lo; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h index 62369be070ea..f325db555102 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h @@ -264,6 +264,7 @@ type HUBP_3DLUT_DONE;\ type HUBP_3DLUT_ADDRESSING_MODE;\ type HUBP_3DLUT_WIDTH;\ + type HUBP_3DLUT_MPC_WIDTH;\ type HUBP_3DLUT_TMZ;\ type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\ type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c index 0da70b50e86d..556214b2227d 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c @@ -505,6 +505,30 @@ void hubp3_init(struct hubp *hubp) hubp_reset(hubp); } +uint32_t hubp3_get_current_read_line(struct hubp *hubp) +{ + uint32_t read_line = 0; + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_GET(HUBPRET_READ_LINE_VALUE, + PIPE_READ_LINE, + &read_line); + + return read_line; +} + +unsigned int hubp3_get_underflow_status(struct hubp *hubp) +{ + uint32_t hubp_underflow = 0; + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_GET(DCHUBP_CNTL, + HUBP_UNDERFLOW_STATUS, + &hubp_underflow); + + return hubp_underflow; +} + static struct hubp_funcs dcn30_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -534,6 +558,8 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_soft_reset = hubp1_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_clear_tiling = hubp3_clear_tiling, + .hubp_get_underflow_status = hubp3_get_underflow_status, + .hubp_get_current_read_line = hubp3_get_current_read_line, }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h index b7d7adf0b58c..842f4eb72cc8 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h @@ -243,7 +243,8 @@ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\ - HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh) + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\ + HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh) bool hubp3_construct( struct dcn20_hubp *hubp2, @@ -299,6 +300,11 @@ void hubp3_init(struct hubp *hubp); void hubp3_clear_tiling(struct hubp *hubp); +uint32_t hubp3_get_current_read_line(struct hubp *hubp); + +uint32_t hubp3_get_underflow_status(struct hubp *hubp); + + #endif /* __DC_HUBP_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c index 7fd582a8a4ba..47101847c2b7 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c @@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value( hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized); } +uint32_t hubp31_get_det_config_error(struct hubp *hubp) +{ + uint32_t config_error = 0; + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_GET(DCHUBP_CNTL, + HUBP_SEG_ALLOC_ERR_STATUS, + &config_error); + + return config_error; +} + static struct hubp_funcs dcn31_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -98,6 +110,9 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, .hubp_clear_tiling = hubp3_clear_tiling, + .hubp_get_underflow_status = hubp3_get_underflow_status, + .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp31_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h index d688db79b750..5952c4671507 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h @@ -228,7 +228,9 @@ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\ - HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh) + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\ + HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh) bool hubp31_construct( @@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable); void hubp31_program_extended_blank_value( struct hubp *hubp, unsigned int min_dst_y_next_start_optimized); +uint32_t hubp31_get_det_config_error(struct hubp *hubp); + #endif /* __DC_HUBP_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c index f3a21c623f44..a5f23bb2a76a 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c @@ -206,6 +206,9 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_update_mall_sel = hubp32_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering, .hubp_clear_tiling = hubp3_clear_tiling, + .hubp_get_underflow_status = hubp3_get_underflow_status, + .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp32_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c index 6d060ba12da8..b140808f21af 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c @@ -218,6 +218,9 @@ static struct hubp_funcs dcn35_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank_value, .hubp_clear_tiling = hubp3_clear_tiling, + .hubp_get_underflow_status = hubp3_get_underflow_status, + .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp35_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index baed31611477..0fcbc6a35be6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width); } -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled) +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0); + REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits); } void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, @@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format); } +void hubp401_program_3dlut_fl_config( + struct hubp *hubp, + struct hubp_fl_3dlut_config *cfg) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1}; + uint32_t width = {cfg->width}; + + if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR) + width = (cfg->width == 17) ? 4916 : 35940; + + REG_UPDATE_2(_3DLUT_FL_CONFIG, + HUBP0_3DLUT_FL_MODE, cfg->mode, + HUBP0_3DLUT_FL_FORMAT, cfg->format); + + REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, + HUBP0_3DLUT_FL_BIAS, cfg->bias, + HUBP0_3DLUT_FL_SCALE, cfg->scale); + + REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH, + HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part); + REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW, + HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part); + + //cross bar + REG_UPDATE_8(HUBP_3DLUT_CONTROL, + HUBP_3DLUT_MPC_WIDTH, mpc_width, + HUBP_3DLUT_WIDTH, width, + HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r, + HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g, + HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b, + HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode, + HUBP_3DLUT_TMZ, cfg->protection_bits, + HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0); +} + void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); @@ -1033,6 +1070,10 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar, .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, .hubp_clear_tiling = hubp401_clear_tiling, + .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config, + .hubp_get_underflow_status = hubp3_get_underflow_status, + .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index 6e1d4c90ddd4..fdabbeec8ffa 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -252,7 +252,9 @@ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\ - HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh) + HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\ + HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh) void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor); @@ -333,7 +335,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled); +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits); void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width); @@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode); +void hubp401_program_3dlut_fl_config( + struct hubp *hubp, + struct hubp_fl_3dlut_config *cfg); + void hubp401_clear_tiling(struct hubp *hubp); void hubp401_vready_at_or_After_vsync(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 38e17b1796e1..24184b4eb352 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -48,7 +48,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" #include "link_hwss.h" -#include "link.h" +#include "link_service.h" #include "dccg.h" #include "clock_source.h" #include "clk_mgr.h" @@ -671,6 +671,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; + link_hwss->setup_stream_attribute(pipe_ctx); link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -1186,8 +1187,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - if (dccg && dccg->funcs->set_dtbclk_dto) - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) { + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -1267,7 +1270,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable); } -static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) +enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) { switch (crtc_id) { case CONTROLLER_ID_D0: @@ -1287,7 +1290,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) } } -static void populate_audio_dp_link_info( +void populate_audio_dp_link_info( const struct pipe_ctx *pipe_ctx, struct audio_dp_link_info *dp_link_info) { @@ -1379,7 +1382,7 @@ static void populate_audio_dp_link_info( } } -static void build_audio_output( +void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, struct audio_output *audio_output) @@ -1684,6 +1687,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + /* Temporary workaround to perform DSC programming ahead of stream enablement + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, true); + } + if (!stream->dpms_off) dc->link_srv->set_dpms_on(context, pipe_ctx); @@ -1909,10 +1925,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) get_edp_streams(context, edp_streams, &edp_stream_num); - // Check fastboot support, disable on DCE8 because of blank screens - if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && - dc->ctx->dce_version != DCE_VERSION_8_1 && - dc->ctx->dce_version != DCE_VERSION_8_3) { + /* Check fastboot support, disable on DCE 6-8 because of blank screens */ + if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (edp_link != edp_streams[0]->link) @@ -1925,6 +1939,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) can_apply_edp_fast_boot = dc_validate_boot_timing(dc, edp_stream->sink, &edp_stream->timing); + + // For Mux-platform, the default value is false. + // Disable fast boot during mux switching. + // The flag would be clean after switching done. + if (dc->is_switch_in_progress_dest && edp_link->is_dds) + can_apply_edp_fast_boot = false; + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) { DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n"); @@ -1968,6 +1989,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (edp_with_sink_num) edp_link_with_sink = edp_links_with_sink[0]; + // During a mux switch, powering down the HW blocks and then enabling + // the link via a DPCD SET_POWER write causes a brief flash + keep_edp_vdd_on |= dc->is_switch_in_progress_dest; + if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ @@ -2228,7 +2253,7 @@ static bool should_enable_fbc(struct dc *dc, /* * Enable FBC */ -static void enable_fbc( +void enable_fbc( struct dc *dc, struct dc_state *context) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 06789ac3a224..9c032e449481 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -110,5 +110,16 @@ void dce110_enable_dp_link_output( enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings); +void build_audio_output( + struct dc_state *state, + const struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output); +enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id); +void populate_audio_dp_link_info( + const struct pipe_ctx *pipe_ctx, + struct audio_dp_link_info *dp_link_info); +void enable_fbc( + struct dc *dc, + struct dc_state *context); #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index f9ee55998b6b..e9fe97f0c4ea 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -55,7 +55,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER \ @@ -327,6 +327,46 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) } } + DTN_INFO("\n=======HUBP FL======\n"); + static const char * const pLabels[] = { + "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ", + "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ", + "adr_low", "REFCYC", "Bias", "Scale", "Mode", + "Format", "prefetch"}; + + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct dcn_fl_regs_st *fl_regs = &s->fl_regs; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; + + if (!s->blank_en) { + uint32_t values[] = { + pool->hubps[i]->inst, + fl_regs->lut_enable, + fl_regs->lut_done, + fl_regs->lut_addr_mode, + fl_regs->lut_width, + fl_regs->lut_mpc_width, + fl_regs->lut_tmz, + fl_regs->lut_crossbar_sel_r, + fl_regs->lut_crossbar_sel_g, + fl_regs->lut_crossbar_sel_b, + fl_regs->lut_addr_hi, + fl_regs->lut_addr_lo, + fl_regs->refcyc_3dlut_group, + fl_regs->lut_fl_bias, + fl_regs->lut_fl_scale, + fl_regs->lut_fl_mode, + fl_regs->lut_fl_format, + dlg_regs->dst_y_prefetch}; + + int num_elements = 18; + + for (int j = 0; j < num_elements; j++) + DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]); + } + } + DTN_INFO("\n=========RQ========\n"); DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" @@ -511,6 +551,60 @@ static void dcn10_log_color_state(struct dc *dc, dc->caps.color.mpc.num_3dluts, dc->caps.color.mpc.ogam_ram, dc->caps.color.mpc.ocsc); + DTN_INFO("===== MPC RMCM 3DLUT =====\n"); + static const char * const pLabels[] = { + "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL", + "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL", + "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW", + "MEM_PWR_ST", "FORCE", "DIS", "MODE"}; + + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) { + uint32_t values[] = { + i, + s.rmcm_regs.rmcm_3dlut_size, + s.rmcm_regs.rmcm_3dlut_mode, + s.rmcm_regs.rmcm_3dlut_mode_cur, + s.rmcm_regs.rmcm_3dlut_read_sel, + s.rmcm_regs.rmcm_3dlut_30bit_en, + s.rmcm_regs.rmcm_3dlut_wr_en_mask, + s.rmcm_regs.rmcm_3dlut_ram_sel, + s.rmcm_regs.rmcm_3dlut_out_norm_factor, + s.rmcm_regs.rmcm_3dlut_fl_sel, + s.rmcm_regs.rmcm_3dlut_out_offset_r, + s.rmcm_regs.rmcm_3dlut_out_scale_r, + s.rmcm_regs.rmcm_3dlut_fl_done, + s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, + s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, + s.rmcm_regs.rmcm_3dlut_mem_pwr_state, + s.rmcm_regs.rmcm_3dlut_mem_pwr_force, + s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, + s.rmcm_regs.rmcm_3dlut_mem_pwr_mode}; + + int num_elements = 19; + + for (int j = 0; j < num_elements; j++) + DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]); + } + } + DTN_INFO("\n"); + DTN_INFO("===== MPC RMCM Shaper =====\n"); + DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur, + s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b, + s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b, + s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state, + s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode); + } } void dcn10_log_hw_state(struct dc *dc, @@ -3253,7 +3347,7 @@ void dcn10_prepare_bandwidth( context, false); - dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); @@ -3569,6 +3663,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) int y_plane = pipe_ctx->plane_state->dst_rect.y; int x_pos = pos_cpy.x; int y_pos = pos_cpy.y; + int clip_x = pipe_ctx->plane_state->clip_rect.x; + int clip_width = pipe_ctx->plane_state->clip_rect.width; if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) { if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) || @@ -3587,7 +3683,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) */ /** - * Translate cursor from stream space to plane space. + * Translate cursor and clip offset from stream space to plane space. * * If the cursor is scaled then we need to scale the position * to be in the approximately correct place. We can't do anything @@ -3604,6 +3700,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state->dst_rect.width; y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / pipe_ctx->plane_state->dst_rect.height; + clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width / + pipe_ctx->plane_state->dst_rect.width; + clip_width = clip_width * pipe_ctx->plane_state->src_rect.width / + pipe_ctx->plane_state->dst_rect.width; } /** @@ -3650,30 +3750,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) if (param.rotation == ROTATION_ANGLE_0) { - int viewport_width = - pipe_ctx->plane_res.scl_data.viewport.width; - int viewport_x = - pipe_ctx->plane_res.scl_data.viewport.x; if (param.mirror) { - if (pipe_split_on || odm_combine_on) { - if (pos_cpy.x >= viewport_width + viewport_x) { - pos_cpy.x = 2 * viewport_width - - pos_cpy.x + 2 * viewport_x; - } else { - uint32_t temp_x = pos_cpy.x; - - pos_cpy.x = 2 * viewport_x - pos_cpy.x; - if (temp_x >= viewport_x + - (int)hubp->curs_attr.width || pos_cpy.x - <= (int)hubp->curs_attr.width + - pipe_ctx->plane_state->src_rect.x) { - pos_cpy.x = 2 * viewport_width - temp_x; - } - } - } else { - pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; - } + /* + * The plane is split into multiple viewports. + * The combination of all viewports span the + * entirety of the clip rect. + * + * For no pipe_split, viewport_width is represents + * the full width of the clip_rect, so we can just + * mirror it. + */ + pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x; } } // Swap axis and mirror horizontally @@ -3743,30 +3831,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) } // Mirror horizontally and vertically else if (param.rotation == ROTATION_ANGLE_180) { - int viewport_width = - pipe_ctx->plane_res.scl_data.viewport.width; - int viewport_x = - pipe_ctx->plane_res.scl_data.viewport.x; - if (!param.mirror) { - if (pipe_split_on || odm_combine_on) { - if (pos_cpy.x >= viewport_width + viewport_x) { - pos_cpy.x = 2 * viewport_width - - pos_cpy.x + 2 * viewport_x; - } else { - uint32_t temp_x = pos_cpy.x; - - pos_cpy.x = 2 * viewport_x - pos_cpy.x; - if (temp_x >= viewport_x + - (int)hubp->curs_attr.width || pos_cpy.x - <= (int)hubp->curs_attr.width + - pipe_ctx->plane_state->src_rect.x) { - pos_cpy.x = temp_x + viewport_width; - } - } - } else { - pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; - } + /* + * The plane is split into multiple viewports. + * The combination of all viewports span the + * entirety of the clip rect. + * + * For no pipe_split, viewport_width is represents + * the full width of the clip_rect, so we can just + * mirror it. + */ + pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x; } /** diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index c277df12c817..9477c9f9e196 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -54,7 +54,7 @@ #include "dpcd_defs.h" #include "inc/link_enc_cfg.h" #include "link_hwss.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER \ @@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock( } /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { pipe_ctx->stream_res.tg->funcs->set_gsl( pipe_ctx->stream_res.tg, &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); } else BREAK_TO_DEBUGGER(); } @@ -956,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); + udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1971,14 +1970,6 @@ static void dcn20_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -1991,10 +1982,8 @@ static void dcn20_program_pipe( * updating on slave planes */ if (pipe_ctx->update_flags.bits.enable || - pipe_ctx->update_flags.bits.plane_changed || - pipe_ctx->stream->update_flags.bits.out_tf || - (pipe_ctx->plane_state && - pipe_ctx->plane_state->update_flags.bits.output_tf_change)) + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.out_tf) hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we @@ -2399,10 +2388,10 @@ void dcn20_prepare_bandwidth( } /* program dchubbub watermarks: - * For assigning wm_optimized_required, use |= operator since we don't want + * For assigning optimized_required, use |= operator since we don't want * to clear the value if the optimize has not happened yet */ - dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); @@ -2415,10 +2404,10 @@ void dcn20_prepare_bandwidth( if (hubbub->funcs->program_compbuf_size) { if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) { compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); } else { compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); } hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); @@ -2492,7 +2481,7 @@ bool dcn20_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2816,6 +2805,8 @@ void dcn20_reset_back_end_for_pipe( { struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dccg *dccg = dc->res_pool->dccg; + struct dtbclk_dto_params dto_params = {0}; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { @@ -2876,6 +2867,13 @@ void dcn20_reset_back_end_for_pipe( &pipe_ctx->link_res, pipe_ctx->stream->signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg + && dc->ctx->dce_version >= DCN_VERSION_3_5) { + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } /* @@ -3054,6 +3052,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link_enc->transmitter - TRANSMITTER_UNIPHY_A); } + link_hwss->setup_stream_attribute(pipe_ctx); + if (dc->res_pool->dccg->funcs->set_pixel_rate_div) dc->res_pool->dccg->funcs->set_pixel_rate_div( dc->res_pool->dccg, @@ -3129,7 +3129,8 @@ void dcn20_fpga_init_hw(struct dc *dc) res_pool->dccg->funcs->dccg_init(res_pool->dccg); //Enable ability to power gate / don't force power on permanently - hws->funcs.enable_power_gating_plane(hws, true); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(hws, true); // Specific to FPGA dccg and registers REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 61efb15572ff..e2269211553c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -35,7 +35,7 @@ #include "hw/clk_mgr.h" #include "dc_dmub_srv.h" #include "abm.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 37a239219dfe..e47ed5571dfd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -50,7 +50,7 @@ #include "dpcd_defs.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_resource.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" @@ -1228,3 +1228,51 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx) } } } + +void dcn30_get_underflow_debug_data(const struct dc *dc, + struct timing_generator *tg, + struct dc_underflow_debug_data *out_data) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (tg) { + uint32_t v_blank_start = 0, v_blank_end = 0; + + out_data->otg_inst = tg->inst; + + tg->funcs->get_scanoutpos(tg, + &v_blank_start, + &v_blank_end, + &out_data->h_position, + &out_data->v_position); + + out_data->otg_frame_count = tg->funcs->get_frame_count(tg); + + out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg); + } + + for (int i = 0; i < MAX_PIPES; i++) { + struct hubp *hubp = dc->res_pool->hubps[i]; + + if (hubp) { + if (hubp->funcs->hubp_get_underflow_status) + out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp); + + if (hubp->funcs->hubp_in_blank) + out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp); + + if (hubp->funcs->hubp_get_current_read_line) + out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp); + + if (hubp->funcs->hubp_get_det_config_error) + out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp); + } + } + + if (hubbub->funcs->get_det_sizes) + hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes); + + if (hubbub->funcs->compbuf_config_error) + out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub); + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 4b90b781c4f2..40afbbfb5b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -29,6 +29,7 @@ #include "hw_sequencer_private.h" struct dc; +struct dc_underflow_debug_data; void dcn30_init_hw(struct dc *dc); void dcn30_program_all_writeback_pipes_in_tree( @@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc, void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx); +void dcn30_get_underflow_debug_data(const struct dc *dc, + struct timing_generator *tg, + struct dc_underflow_debug_data *out_data); + #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 2ac5d54d1626..d7ff55669bac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported, .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 5ba3999991b0..b822f2dffff0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -45,7 +45,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "inc/link_enc_cfg.h" @@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); + /* Temporary workaround to perform DSC programming ahead of pipe reset + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, false); + } + /* free acquired resources */ if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 556f4fe57eda..5a6a459da224 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn31_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index e68f21fd5f0f..f925f669f2a4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -46,7 +46,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -528,3 +528,75 @@ void dcn314_disable_link_output(struct dc_link *link, apply_symclk_on_tx_off_wa(link); } + +/** + * dcn314_dpp_pg_control - DPP power gate control. + * + * @hws: dce_hwseq reference. + * @dpp_inst: DPP instance reference. + * @power_on: true if we want to enable power gate, false otherwise. + * + * Enable or disable power gate in the specific DPP instance. + * If power gating is disabled, will force disable cursor in the DPP instance. + */ +void dcn314_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on) +{ + uint32_t power_gate = power_on ? 0 : 1; + uint32_t pwr_status = power_on ? 0 : 2; + + + if (hws->ctx->dc->debug.disable_dpp_power_gate) { + /* Workaround for DCN314 with disabled power gating */ + if (!power_on) { + + /* Force disable cursor if power gating is disabled */ + struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst]; + if (dpp && dpp->funcs->dpp_force_disable_cursor) + dpp->funcs->dpp_force_disable_cursor(dpp); + } + return; + } + if (REG(DOMAIN1_PG_CONFIG) == 0) + return; + + switch (dpp_inst) { + case 0: /* DPP0 */ + REG_UPDATE(DOMAIN1_PG_CONFIG, + DOMAIN1_POWER_GATE, power_gate); + + REG_WAIT(DOMAIN1_PG_STATUS, + DOMAIN1_PGFSM_PWR_STATUS, pwr_status, + 1, 1000); + break; + case 1: /* DPP1 */ + REG_UPDATE(DOMAIN3_PG_CONFIG, + DOMAIN3_POWER_GATE, power_gate); + + REG_WAIT(DOMAIN3_PG_STATUS, + DOMAIN3_PGFSM_PWR_STATUS, pwr_status, + 1, 1000); + break; + case 2: /* DPP2 */ + REG_UPDATE(DOMAIN5_PG_CONFIG, + DOMAIN5_POWER_GATE, power_gate); + + REG_WAIT(DOMAIN5_PG_STATUS, + DOMAIN5_PGFSM_PWR_STATUS, pwr_status, + 1, 1000); + break; + case 3: /* DPP3 */ + REG_UPDATE(DOMAIN7_PG_CONFIG, + DOMAIN7_POWER_GATE, power_gate); + + REG_WAIT(DOMAIN7_PG_STATUS, + DOMAIN7_PGFSM_PWR_STATUS, pwr_status, + 1, 1000); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h index 2305ad282f21..6c072d0274ea 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h @@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); +void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index f5112742edf9..79faab1125d4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .update_visual_confirm_color = dcn10_update_visual_confirm_color, .calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider, .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn314_private_funcs = { @@ -141,6 +142,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .enable_power_gating_plane = dcn314_enable_power_gating_plane, .dpp_root_clock_control = dcn314_dpp_root_clock_control, .hubp_pg_control = dcn31_hubp_pg_control, + .dpp_pg_control = dcn314_dpp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn314_update_odm, .dsc_pg_control = dcn314_dsc_pg_control, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index a0b05b9ef660..f39292952702 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -49,7 +49,7 @@ #include "dcn20/dcn20_optc.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32/dcn32_resource.h" -#include "link.h" +#include "link_service.h" #include "../dcn20/dcn20_hwseq.h" #include "dc_state_priv.h" @@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; @@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); + if (!odm_dsc) + continue; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index b971356d30b1..c19ef075c882 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, .program_outstanding_updates = dcn32_program_outstanding_updates, .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index a267f574b619..05011061822c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -46,7 +46,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -113,6 +113,14 @@ static void enable_memory_low_power(struct dc *dc) } #endif +static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log) +{ + if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) { + if (dc->res_pool->pg_cntl->funcs->print_pg_status) + dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log); + } +} + void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable) { REG_UPDATE_3(DMU_CLK_CNTL, @@ -137,6 +145,8 @@ void dcn35_init_hw(struct dc *dc) uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; + print_pg_status(dc, __func__, ": start"); + if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -200,10 +210,7 @@ void dcn35_init_hw(struct dc *dc) /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); -/* - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); -*/ + if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init) res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub); /* If taking control over from VBIOS, we may want to optimize our first @@ -236,6 +243,8 @@ void dcn35_init_hw(struct dc *dc) } hws->funcs.init_pipes(dc, dc->current_state); + print_pg_status(dc, __func__, ": after init_pipes"); + if (dc->res_pool->hubbub->funcs->allow_self_refresh_control && !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, @@ -312,6 +321,7 @@ void dcn35_init_hw(struct dc *dc) if (dc->res_pool->pg_cntl->funcs->init_pg_status) dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl); } + print_pg_status(dc, __func__, ": after init_pg_status"); } static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) @@ -500,97 +510,6 @@ void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_ } } -void dcn35_dsc_pg_control( - struct dce_hwseq *hws, - unsigned int dsc_inst, - bool power_on) -{ - uint32_t power_gate = power_on ? 0 : 1; - uint32_t pwr_status = power_on ? 0 : 2; - uint32_t org_ip_request_cntl = 0; - - if (hws->ctx->dc->debug.disable_dsc_power_gate) - return; - if (hws->ctx->dc->debug.ignore_pg) - return; - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - - switch (dsc_inst) { - case 0: /* DSC0 */ - REG_UPDATE(DOMAIN16_PG_CONFIG, - DOMAIN_POWER_GATE, power_gate); - - REG_WAIT(DOMAIN16_PG_STATUS, - DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); - break; - case 1: /* DSC1 */ - REG_UPDATE(DOMAIN17_PG_CONFIG, - DOMAIN_POWER_GATE, power_gate); - - REG_WAIT(DOMAIN17_PG_STATUS, - DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); - break; - case 2: /* DSC2 */ - REG_UPDATE(DOMAIN18_PG_CONFIG, - DOMAIN_POWER_GATE, power_gate); - - REG_WAIT(DOMAIN18_PG_STATUS, - DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); - break; - case 3: /* DSC3 */ - REG_UPDATE(DOMAIN19_PG_CONFIG, - DOMAIN_POWER_GATE, power_gate); - - REG_WAIT(DOMAIN19_PG_STATUS, - DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); -} - -void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) -{ - bool force_on = true; /* disable power gating */ - uint32_t org_ip_request_cntl = 0; - - if (hws->ctx->dc->debug.disable_hubp_power_gate) - return; - if (hws->ctx->dc->debug.ignore_pg) - return; - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - /* DCHUBP0/1/2/3/4/5 */ - REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - /* DPP0/1/2/3/4/5 */ - REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - - force_on = true; /* disable power gating */ - if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate) - force_on = false; - - /* DCS0/1/2/3/4 */ - REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - - -} - /* In headless boot cases, DIG may be turned * on which causes HW/SW discrepancies. * To avoid this, power down hardware on boot @@ -1453,6 +1372,8 @@ void dcn35_prepare_bandwidth( } dcn20_prepare_bandwidth(dc, context); + + print_pg_status(dc, __func__, ": after rcg and power up"); } void dcn35_optimize_bandwidth( @@ -1461,6 +1382,8 @@ void dcn35_optimize_bandwidth( { struct pg_block_update pg_update_state; + print_pg_status(dc, __func__, ": before rcg and power up"); + dcn20_optimize_bandwidth(dc, context); if (dc->hwss.calc_blocks_to_gate) { @@ -1472,6 +1395,8 @@ void dcn35_optimize_bandwidth( if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, false); } + + print_pg_status(dc, __func__, ": after rcg and power up"); } void dcn35_set_drr(struct pipe_ctx **pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index a3ccf805bd16..f2f16a0bdb4f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -115,7 +115,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, - .update_dsc_pg = dcn32_update_dsc_pg, .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, .hw_block_power_up = dcn35_hw_block_power_up, @@ -128,6 +127,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .enable_plane = dcn20_enable_plane, .update_dchubp_dpp = dcn20_update_dchubp_dpp, .post_unlock_reset_opp = dcn20_post_unlock_reset_opp, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn35_private_funcs = { @@ -150,7 +150,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { .plane_atomic_disable = dcn35_plane_atomic_disable, //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ //.hubp_pg_control = dcn35_hubp_pg_control, - .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .physymclk_root_clock_control = dcn35_physymclk_root_clock_control, @@ -165,7 +164,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, - .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 58f2be2a326b..09e60158f0b5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -114,7 +114,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, - .update_dsc_pg = dcn32_update_dsc_pg, .calc_blocks_to_gate = dcn351_calc_blocks_to_gate, .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate, .hw_block_power_up = dcn351_hw_block_power_up, @@ -123,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn351_private_funcs = { @@ -145,7 +145,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { .plane_atomic_disable = dcn35_plane_atomic_disable, //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ //.hubp_pg_control = dcn35_hubp_pg_control, - .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .physymclk_root_clock_control = dcn35_physymclk_root_clock_control, @@ -159,7 +158,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, - .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index c4177a9a662f..7c276c319086 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -2,6 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. + +#include "os_types.h" #include "dm_services.h" #include "basics/dc_common.h" #include "dm_helpers.h" @@ -23,7 +25,7 @@ #include "dpcd_defs.h" #include "clk_mgr.h" #include "dsc.h" -#include "link.h" +#include "link_service.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn10/dcn10_cm_common.h" @@ -49,7 +51,7 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn401_initialize_min_clocks(struct dc *dc) +void dcn401_initialize_min_clocks(struct dc *dc) { struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; @@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // mark dcmode limits present if any clock has distinct AC and DC values from SMU - dc->caps.dcmode_power_limits_present = - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); + dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present && + dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr); } // Initialize the dccg @@ -396,249 +393,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct } } -static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend) -{ - struct mpc *mpc = dc->res_pool->mpc; - int mpcc_id = pipe_ctx->plane_res.hubp->inst; - - if (!pipe_ctx->plane_state) - return; - - mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); - pipe_ctx->plane_state->mcm_location = (bPostBlend) ? - MPCC_MOVABLE_CM_LOCATION_AFTER : - MPCC_MOVABLE_CM_LOCATION_BEFORE; -} - -static void dc_get_lut_mode( - enum dc_cm2_gpu_mem_layout layout, - enum hubp_3dlut_fl_mode *mode, - enum hubp_3dlut_fl_addressing_mode *addr_mode) -{ - switch (layout) { - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: - *mode = hubp_3dlut_fl_mode_native_1; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: - *mode = hubp_3dlut_fl_mode_native_2; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: - *mode = hubp_3dlut_fl_mode_transform; - *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; - break; - default: - *mode = hubp_3dlut_fl_mode_disable; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - } -} - -static void dc_get_lut_format( - enum dc_cm2_gpu_mem_format dc_format, - enum hubp_3dlut_fl_format *format) -{ - switch (dc_format) { - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: - *format = hubp_3dlut_fl_format_unorm_12msb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: - *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: - *format = hubp_3dlut_fl_format_float_fp1_5_10; - break; - } -} - -static void dc_get_lut_xbar( - enum dc_cm2_gpu_mem_pixel_component_order order, - enum hubp_3dlut_fl_crossbar_bit_slice *cr_r, - enum hubp_3dlut_fl_crossbar_bit_slice *y_g, - enum hubp_3dlut_fl_crossbar_bit_slice *cb_b) -{ - switch (order) { - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15; - break; - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; - break; - } -} - -static void dc_get_lut_width( - enum dc_cm2_gpu_mem_size size, - enum hubp_3dlut_fl_width *width) -{ - switch (size) { - case DC_CM2_GPU_MEM_SIZE_333333: - *width = hubp_3dlut_fl_width_33; - break; - case DC_CM2_GPU_MEM_SIZE_171717: - *width = hubp_3dlut_fl_width_17; - break; - case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: - *width = hubp_3dlut_fl_width_transformed; - break; - } -} -static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc) -{ - if (mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.program_lut_read_write_control && - hubp->funcs->hubp_program_3dlut_fl_addr && - mpc->funcs->rmcm.program_bit_depth && - hubp->funcs->hubp_program_3dlut_fl_mode && - hubp->funcs->hubp_program_3dlut_fl_addressing_mode && - hubp->funcs->hubp_program_3dlut_fl_format && - hubp->funcs->hubp_update_3dlut_fl_bias_scale && - mpc->funcs->rmcm.program_bias_scale && - hubp->funcs->hubp_program_3dlut_fl_crossbar && - hubp->funcs->hubp_program_3dlut_fl_width && - mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.populate_lut && - mpc->funcs->rmcm.program_lut_mode && - hubp->funcs->hubp_enable_3dlut_fl && - mpc->funcs->rmcm.enable_3dlut_fl) - return true; - - return false; -} - -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id) -{ - struct dpp *dpp_base = pipe_ctx->plane_res.dpp; - union mcm_lut_params m_lut_params; - enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable; - enum hubp_3dlut_fl_mode mode; - enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_format format = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; - enum hubp_3dlut_fl_width width = 0; - struct dc *dc = hubp->ctx->dc; - - bool bypass_rmcm_3dlut = false; - bool bypass_rmcm_shaper = false; - - dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - - /* 3DLUT */ - switch (lut3d_src) { - case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - memset(&m_lut_params, 0, sizeof(m_lut_params)); - // Don't know what to do in this case. - //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - break; - case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: - dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width); - if (!dc_is_rmcm_3dlut_supported(hubp, mpc) || - !mpc->funcs->rmcm.is_config_supported(width)) - return false; - - //0. disable fl on mpc - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF); - - //1. power down the block - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false); - - //2. program RMCM - //2a. 3dlut reg programming - mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, - (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id); - - hubp->funcs->hubp_program_3dlut_fl_addr(hubp, - mcm_luts->lut3d_data.gpu_mem_params.addr); - - mpc->funcs->rmcm.program_bit_depth(mpc, - mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id); - - // setting native or transformed mode, - dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode); - - //these program the mcm 3dlut - hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); - - hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); - - //seems to be only for the MCM - dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format); - hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); - - mpc->funcs->rmcm.program_bias_scale(mpc, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale, - mpcc_id); - hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale); - - dc_get_lut_xbar( - mcm_luts->lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); - - hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, - crossbar_bit_slice_cr_r, - crossbar_bit_slice_y_g, - crossbar_bit_slice_cb_b); - - mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id); - - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); - - //2b. shaper reg programming - memset(&m_lut_params, 0, sizeof(m_lut_params)); - - if (mcm_luts->shaper->type == TF_TYPE_HWPWL) { - m_lut_params.pwl = &mcm_luts->shaper->pwl; - } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - ASSERT(false); - cm_helper_translate_curve_to_hw_format( - dc->ctx, - mcm_luts->shaper, - &dpp_base->regamma_params, true); - m_lut_params.pwl = &dpp_base->regamma_params; - } - if (m_lut_params.pwl) { - mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); - mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id); - } else { - //RMCM 3dlut won't work without its shaper - return false; - } - - //3. Select the hubp connected to this RMCM - hubp->funcs->hubp_enable_3dlut_fl(hubp, true); - mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id); - - //4. power on the block - if (m_lut_params.pwl) - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true); - - break; - default: - return false; - } - - return true; -} - void dcn401_populate_mcm_luts(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, @@ -664,25 +418,6 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - //MCM - setting its location (Before/After) blender - //set to post blend (true) - dcn401_set_mcm_location_post_blend( - dc, - pipe_ctx, - mcm_luts.lut3d_data.mpc_mcm_post_blend); - - //RMCM - 3dLUT+Shaper - if (mcm_luts.lut3d_data.rmcm_3dlut_enable) { - dcn401_program_rmcm_luts( - hubp, - pipe_ctx, - lut3d_src, - &mcm_luts, - mpc, - lut_bank_a, - mpcc_id); - } - /* 1D LUT */ if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); @@ -740,15 +475,15 @@ void dcn401_populate_mcm_luts(struct dc *dc, break; case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: switch (mcm_luts.lut3d_data.gpu_mem_params.size) { - case DC_CM2_GPU_MEM_SIZE_333333: - width = hubp_3dlut_fl_width_33; - break; case DC_CM2_GPU_MEM_SIZE_171717: width = hubp_3dlut_fl_width_17; break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: width = hubp_3dlut_fl_width_transformed; break; + default: + //TODO: handle default case + break; } //check for support @@ -817,11 +552,14 @@ void dcn401_populate_mcm_luts(struct dc *dc, //navi 4x has a bug and r and blue are swapped and need to be worked around here in //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x - dc_get_lut_xbar( - mcm_luts.lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); + switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { + case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: + default: + crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; + crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; + crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; + break; + } if (hubp->funcs->hubp_program_3dlut_fl_crossbar) hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, @@ -1072,9 +810,12 @@ enum dc_status dcn401_enable_stream_timing( if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); - /* if we are borrowing from hblank, h_addressable needs to be adjusted */ - if (dc->debug.enable_hblank_borrow) - patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; + /* if we are padding, h_addressable needs to be adjusted */ + if (dc->debug.enable_hblank_borrow) { + patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding; + patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; + patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + } pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, @@ -1227,6 +968,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) } } + link_hwss->setup_stream_attribute(pipe_ctx); + if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { dc->res_pool->dccg->funcs->set_pixel_rate_div( dc->res_pool->dccg, @@ -1640,22 +1383,22 @@ void dcn401_prepare_bandwidth(struct dc *dc, false); /* program dchubbub watermarks: - * For assigning wm_optimized_required, use |= operator since we don't want + * For assigning optimized_required, use |= operator since we don't want * to clear the value if the optimize has not happened yet */ - dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); /* update timeout thresholds */ if (hubbub->funcs->program_arbiter) { - dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false); + dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false); } /* decrease compbuf size */ if (hubbub->funcs->program_compbuf_segments) { compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; - dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); + dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false); } @@ -1881,20 +1624,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, void dcn401_hardware_release(struct dc *dc) { - dc_dmub_srv_fams2_update_config(dc, dc->current_state, false); - - /* If pstate unsupported, or still supported - * by firmware, force it supported by dcn - */ - if (dc->current_state) { - if ((!dc->clk_mgr->clks.p_state_change_support || - dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) && - dc->res_pool->hubbub->funcs->force_pstate_change_control) - dc->res_pool->hubbub->funcs->force_pstate_change_control( - dc->res_pool->hubbub, true, true); - - dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true; - dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true); + if (!dc->debug.disable_force_pstate_allow_on_hw_release) { + dc_dmub_srv_fams2_update_config(dc, dc->current_state, false); + + /* If pstate unsupported, or still supported + * by firmware, force it supported by dcn + */ + if (dc->current_state) { + if ((!dc->clk_mgr->clks.p_state_change_support || + dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) && + dc->res_pool->hubbub->funcs->force_pstate_change_control) + dc->res_pool->hubbub->funcs->force_pstate_change_control( + dc->res_pool->hubbub, true, true); + + dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true); + } + } else { + if (dc->current_state) { + dc->clk_mgr->clks.p_state_change_support = false; + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true); + } + dc_dmub_srv_fams2_update_config(dc, dc->current_state, false); } } @@ -2269,14 +2020,6 @@ void dcn401_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2289,10 +2032,8 @@ void dcn401_program_pipe( * updating on slave planes */ if (pipe_ctx->update_flags.bits.enable || - pipe_ctx->update_flags.bits.plane_changed || - pipe_ctx->stream->update_flags.bits.out_tf || - (pipe_ctx->plane_state && - pipe_ctx->plane_state->update_flags.bits.output_tf_change)) + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.out_tf) hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we @@ -2651,7 +2392,7 @@ bool dcn401_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2902,10 +2643,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 1); + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); + } if (hws->funcs.dpp_pg_control) hws->funcs.dpp_pg_control(hws, dpp->inst, false); @@ -2916,7 +2659,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc, hubp->funcs->hubp_reset(hubp); dpp->funcs->dpp_reset(dpp); - if (org_ip_request_cntl == 0) + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index ce65b4f6c672..2621b7725267 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -109,12 +109,5 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id); +void dcn401_initialize_min_clocks(struct dc *dc); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index fe7aceb2f510..d6e11b7e4fce 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -104,6 +104,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .enable_plane = dcn20_enable_plane, .update_dchubp_dpp = dcn20_update_dchubp_dpp, .post_unlock_reset_opp = dcn20_post_unlock_reset_opp, + .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn401_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 3a0795045bc6..1723bbcf2c46 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -47,6 +47,7 @@ struct link_resource; struct dc_dmub_cmd; struct pg_block_update; struct drr_params; +struct dc_underflow_debug_data; struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -475,6 +476,9 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*post_unlock_reset_opp)(struct dc *dc, struct pipe_ctx *opp_head); + void (*get_underflow_debug_data)(const struct dc *dc, + struct timing_generator *tg, + struct dc_underflow_debug_data *out_data); }; void color_space_to_black_color( @@ -502,6 +506,9 @@ void get_hdr_visual_confirm_color( void get_mpctree_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color); void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index f3696143590c..82085d9c3f40 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -59,6 +59,7 @@ enum dc_status { DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, DC_FAIL_DP_LINK_BANDWIDTH = 28, DC_FAIL_HW_CURSOR_SUPPORT = 29, + DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 0cf349cafb3e..d11893f8c916 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -67,6 +67,8 @@ struct resource_context; struct clk_bw_params; struct dc_mcache_params; +#define MAX_RMCM_INST 2 + struct resource_funcs { enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); void (*destroy)(struct resource_pool **pool); @@ -82,7 +84,7 @@ struct resource_funcs { enum dc_status (*validate_bandwidth)( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void (*calculate_wm_and_dlg)( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -107,7 +109,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * Algorithm for assigning available link encoders to links. @@ -223,6 +225,11 @@ struct resource_funcs { const struct dc_stream_state *stream); bool (*program_mcache_pipe_config)(struct dc_state *context, const struct dc_mcache_params *mcache_params); + enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); }; struct audio_support{ @@ -281,6 +288,7 @@ struct resource_pool { struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; + struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST]; struct { unsigned int xtalin_clock_inKhz; @@ -425,7 +433,14 @@ enum p_state_switch_method { P_STATE_V_ACTIVE, P_STATE_SUB_VP, P_STATE_DRR_SUB_VP, - P_STATE_V_BLANK_SUB_VP + P_STATE_V_BLANK_SUB_VP, +}; + +struct dsc_padding_params { + /* pixels borrowed from hblank to hactive */ + uint8_t dsc_hactive_padding; + uint32_t dsc_htotal_padding; + uint32_t dsc_pix_clk_100hz; }; struct pipe_ctx { @@ -485,8 +500,7 @@ struct pipe_ctx { /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ uint8_t subvp_index; struct pixel_rate_divider pixel_rate_divider; - /* pixels borrowed from hblank to hactive */ - uint8_t hblank_borrow; + struct dsc_padding_params dsc_padding_params; /* next vupdate */ uint32_t next_vupdate; uint32_t wait_frame_count; @@ -556,7 +570,10 @@ struct dcn_bw_output { struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; struct dmub_cmd_fams2_global_config fams2_global_config; union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES]; - union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union { + union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES]; + }; struct dml2_display_arb_regs arb_regs; }; @@ -672,6 +689,7 @@ struct replay_context { /* Controller Id used for Dig Fe source select */ enum controller_id controllerId; unsigned int line_time_in_ns; + bool os_request_force_ffu; }; enum dc_replay_enable { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index d19a595c2be4..134091d5842d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults; bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn_get_soc_clks( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index c14d64687a3d..2c9a4a12bd8a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -100,6 +100,17 @@ struct dcn301_clk_internal { #define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 +enum clk_type { + CLK_TYPE_DCFCLK, + CLK_TYPE_FCLK, + CLK_TYPE_MCLK, + CLK_TYPE_SOCCLK, + CLK_TYPE_DTBCLK, + CLK_TYPE_DISPCLK, + CLK_TYPE_DPPCLK, + CLK_TYPE_DSCCLK, + CLK_TYPE_COUNT +}; struct clk_limit_table_entry { unsigned int voltage; /* milivolts withh 2 fractional bits */ @@ -324,6 +335,11 @@ struct clk_mgr_funcs { int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base); + bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr); + + uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set); + + unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); }; struct clk_mgr { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index e94e9ba60f55..61c4d2a7db1c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -211,7 +211,7 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); - void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst); + void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 52b745667ef7..843a18287c83 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -137,6 +137,19 @@ struct dcn_hubbub_state { uint32_t dram_state_cntl; }; +struct hubbub_system_latencies { + uint32_t max_latency_ns; + uint32_t avg_latency_ns; + uint32_t min_latency_ns; +}; + +struct hubbub_urgent_latency_params { + uint32_t refclk_mhz; + uint32_t t_win_ns; + uint32_t bandwidth_mbps; + uint32_t bw_factor_x1000; +}; + struct hubbub_funcs { void (*update_dchub)( struct hubbub *hubbub, @@ -229,6 +242,17 @@ struct hubbub_funcs { void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower); + void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes); + uint32_t (*compbuf_config_error)(struct hubbub *hubbub); + struct hubbub_perfmon_funcs{ + void (*start_system_latency_measurement)(struct hubbub *hubbub); + void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies); + void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub); + void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps); + void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params); + void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns); + void (*reset)(struct hubbub *hubbub); + } perfmon; }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 0c5675d1c593..1b7c085dc2cc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -349,6 +349,9 @@ struct dpp_funcs { struct dpp *dpp_base, enum dc_color_space color_space, struct dc_csc_transform cursor_csc_color_matrix); + + void (*dpp_force_disable_cursor)(struct dpp *dpp_base); + }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index b610beb075d5..2b874d2cc61c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode { enum hubp_3dlut_fl_width { hubp_3dlut_fl_width_17 = 17, hubp_3dlut_fl_width_33 = 33, - hubp_3dlut_fl_width_transformed = 4916 + hubp_3dlut_fl_width_transformed = 4916, //mpc default }; enum hubp_3dlut_fl_crossbar_bit_slice { @@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice { hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3 }; +struct hubp_fl_3dlut_config { + bool enabled; + enum hubp_3dlut_fl_width width; + enum hubp_3dlut_fl_mode mode; + enum hubp_3dlut_fl_format format; + uint16_t bias; + uint16_t scale; + struct dc_plane_address address; + enum hubp_3dlut_fl_addressing_mode addr_mode; + enum dc_cm2_gpu_mem_layout layout; + uint8_t protection_bits; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r; +}; + struct hubp { const struct hubp_funcs *funcs; struct dc_context *ctx; @@ -282,13 +298,16 @@ struct hubp_funcs { void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable); void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode); void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width); - void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled); + void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits); void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); int (*hubp_get_3dlut_fl_done)(struct hubp *hubp); + void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg); void (*hubp_clear_tiling)(struct hubp *hubp); + uint32_t (*hubp_get_current_read_line)(struct hubp *hubp); + uint32_t (*hubp_get_det_config_error)(struct hubp *hubp); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 6e303b81bfb0..22960ee03dee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -115,6 +115,16 @@ enum MCM_LUT_ID { MCM_LUT_SHAPER }; +struct mpc_fl_3dlut_config { + bool enabled; + uint16_t width; + bool select_lut_bank_a; + uint16_t bit_depth; + int hubp_index; + uint16_t bias; + uint16_t scale; +}; + union mcm_lut_params { const struct pwl_params *pwl; const struct tetrahedral_params *lut3d; @@ -190,6 +200,42 @@ struct mpc_grph_gamut_adjustment { enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id; }; +struct mpc_rmcm_regs { + uint32_t rmcm_3dlut_mem_pwr_state; + uint32_t rmcm_3dlut_mem_pwr_force; + uint32_t rmcm_3dlut_mem_pwr_dis; + uint32_t rmcm_3dlut_mem_pwr_mode; + uint32_t rmcm_3dlut_size; + uint32_t rmcm_3dlut_mode; + uint32_t rmcm_3dlut_mode_cur; + uint32_t rmcm_3dlut_read_sel; + uint32_t rmcm_3dlut_30bit_en; + uint32_t rmcm_3dlut_wr_en_mask; + uint32_t rmcm_3dlut_ram_sel; + uint32_t rmcm_3dlut_out_norm_factor; + uint32_t rmcm_3dlut_fl_sel; + uint32_t rmcm_3dlut_out_offset_r; + uint32_t rmcm_3dlut_out_scale_r; + uint32_t rmcm_3dlut_fl_done; + uint32_t rmcm_3dlut_fl_soft_underflow; + uint32_t rmcm_3dlut_fl_hard_underflow; + uint32_t rmcm_cntl; + uint32_t rmcm_shaper_mem_pwr_state; + uint32_t rmcm_shaper_mem_pwr_force; + uint32_t rmcm_shaper_mem_pwr_dis; + uint32_t rmcm_shaper_mem_pwr_mode; + uint32_t rmcm_shaper_lut_mode; + uint32_t rmcm_shaper_mode_cur; + uint32_t rmcm_shaper_lut_write_en_mask; + uint32_t rmcm_shaper_lut_write_sel; + uint32_t rmcm_shaper_offset_b; + uint32_t rmcm_shaper_scale_b; + uint32_t rmcm_shaper_rama_exp_region_start_b; + uint32_t rmcm_shaper_rama_exp_region_start_seg_b; + uint32_t rmcm_shaper_rama_exp_region_end_b; + uint32_t rmcm_shaper_rama_exp_region_end_base_b; +}; + struct mpcc_sm_cfg { bool enable; /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */ @@ -301,6 +347,7 @@ struct mpcc_state { uint32_t rgam_mode; uint32_t rgam_lut; struct mpc_grph_gamut_adjustment gamut_remap; + struct mpc_rmcm_regs rmcm_regs; }; /** @@ -1022,22 +1069,12 @@ struct mpc_funcs { */ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable, bool lut_bank_a, int mpcc_id); - /** - * @program_3dlut_size: - * - * Program 3D LUT size. - * - * Parameters: - * - [in/out] mpc - MPC context. - * - [in] is_17x17x17 - is 3dlut 17x17x17 - * - [in] mpcc_id - * - * Return: - * - * void - */ - void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id); + /** + * @mcm: + * + * MPC MCM new HW sequential programming functions + */ struct { void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id); void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id); @@ -1050,7 +1087,13 @@ struct mpc_funcs { bool lut_bank_a, int mpcc_id); } mcm; + /** + * @rmcm: + * + * MPC RMCM new HW sequential programming functions + */ struct { + void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id); void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id); void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index 00ea3864dd4d..227e3f8d7e5f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -46,7 +46,10 @@ struct pg_cntl_funcs { void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on); void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); + void (*print_pg_status)(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log); }; #endif //__DC_PG_CNTL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe7f3137f228..27f950ae45ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -117,6 +117,7 @@ struct stream_encoder { uint32_t stream_enc_inst; struct vpg *vpg; struct afmt *afmt; + struct apg *apg; }; struct enc_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 267ace4eef8a..f2de2cf23859 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -374,6 +374,7 @@ struct timing_generator_funcs { void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*wait_otg_disable)(struct timing_generator *optc); bool (*get_optc_double_buffer_pending)(struct timing_generator *tg); bool (*get_otg_double_buffer_pending)(struct timing_generator *tg); bool (*get_pipe_update_pending)(struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h index 7d16351bba99..1e34e84160aa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h @@ -42,8 +42,8 @@ * dc_link_exports.c or other dc files implement dc.h * * DC to Link: - * dc_link_exports.c or other dc files include link.h - * link_factory.c implements link.h + * dc_link_exports.c or other dc files include link_service.h + * link_factory.c implements link_service.h * * Link sub-component to Link sub-component: * link_factory.c includes --> link_xxx.h @@ -73,7 +73,7 @@ * 2. Implement your function in the suitable link_xxx.c file. * 3. Assign the function to link_service in link_factory.c * 4. NEVER include link_xxx.h headers outside link component. - * 5. NEVER include link.h on DM side. + * 5. NEVER include link_service.h on DM side. */ #include "core_types.h" @@ -144,9 +144,9 @@ struct link_service { uint32_t (*dp_link_bandwidth_kbps)( const struct dc_link *link, const struct dc_link_settings *link_settings); - bool (*validate_dpia_bandwidth)( - const struct dc_stream_state *stream, - const unsigned int num_streams); + enum dc_status (*validate_dp_tunnel_bandwidth)( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t (*dp_required_hblank_size_bytes)( const struct dc_link *link, @@ -218,7 +218,10 @@ struct link_service { bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link); enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link, struct dc_link_settings *link_setting); - + uint8_t (*dp_get_lttpr_count)(struct dc_link *link); + void (*edp_get_alpm_support)(struct dc_link *link, + bool *auxless_support, + bool *auxwake_support); /*************************** DP DPIA/PHY ******************************/ void (*dpia_handle_usb4_bandwidth_allocation_for_link)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index a890f581f4e8..4e26a16a8743 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -45,6 +45,7 @@ enum dce_version resource_parse_asic_id( struct resource_caps { int num_timing_generator; int num_opp; + int num_dpp; int num_video_plane; int num_audio; int num_stream_encoder; diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h new file mode 100644 index 000000000000..23daf98b8aa8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#ifndef __SOC_AND_IP_TRANSLATOR_H__ +#define __SOC_AND_IP_TRANSLATOR_H__ + +#include "dc.h" +#include "dml_top_soc_parameter_types.h" + +struct soc_and_ip_translator_funcs { + void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config); + void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps); +}; + +struct soc_and_ip_translator { + const struct soc_and_ip_translator_funcs *translator_funcs; +}; + +struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version); +void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator); + + +#endif // __SOC_AND_IP_TRANSLATOR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 96febabf464a..9e33bf937a69 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -34,6 +34,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "clk_mgr.h" #define DC_LOGGER \ link->ctx->logger @@ -67,10 +68,19 @@ static void dp_retrain_link_dp_test(struct dc_link *link, { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; + struct dc_stream_update stream_update = { 0 }; + bool dpms_off = false; + bool needs_divider_update = false; bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state); bool is_hpo_acquired; uint8_t count; int i; + struct audio_output audio_output[MAX_PIPES]; + struct dc_stream_state *streams_on_link[MAX_PIPES]; + int num_streams_on_link = 0; + + needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != + link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); udelay(100); @@ -83,16 +93,66 @@ static void dp_retrain_link_dp_test(struct dc_link *link, link->dc, state, pipes[i]); + + // Disable OTG and re-enable after updating clocks + pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg); } - if (link->dc->hwss.setup_hpo_hw_control) { - is_hpo_acquired = resource_is_hpo_acquired(state); - if (was_hpo_acquired != is_hpo_acquired) - link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) { + link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link, + link_setting, count, + *pipes, &audio_output[0]); + for (i = 0; i < count; i++) { + pipes[i]->clock_source->funcs->program_pix_clk( + pipes[i]->clock_source, + &pipes[i]->stream_res.pix_clk_params, + link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings), + &pipes[i]->pll_settings); + + if (pipes[i]->stream_res.audio != NULL) { + const struct link_hwss *link_hwss = get_link_hwss( + link, &pipes[i]->link_res); + + link_hwss->setup_audio_output(pipes[i], &audio_output[i], + pipes[i]->stream_res.audio->inst); + + pipes[i]->stream_res.audio->funcs->az_configure( + pipes[i]->stream_res.audio, + pipes[i]->stream->signal, + &audio_output[i].crtc_info, + &pipes[i]->stream->audio_info, + &audio_output[i].dp_link_info); + + if (link->dc->config.disable_hbr_audio_dp2 && + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio && + link->dc->link_srv->dp_is_128b_132b_signal(pipes[i])) + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio); + } + } } - for (i = count-1; i >= 0; i--) - link_set_dpms_on(state, pipes[i]); + // Toggle on HPO I/O if necessary + is_hpo_acquired = resource_is_hpo_acquired(state); + if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control) + link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + + for (i = 0; i < count; i++) + pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg); + + // Set DPMS on with stream update + // Cache all streams on current link since dc_update_planes_and_stream might kill current_state + for (i = 0; i < MAX_PIPES; i++) { + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) + streams_on_link[num_streams_on_link++] = state->streams[i]; + } + + for (i = 0; i < num_streams_on_link; i++) { + if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) { + stream_update.stream = streams_on_link[i]; + stream_update.dpms_off = &dpms_off; + dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update); + } + } } static void dp_test_send_link_training(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h index eae23ea7f6ec..033650cdb811 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_DP_CTS_H__ #define __LINK_DP_CTS_H__ -#include "link.h" +#include "link_service.h" void dp_handle_automated_test(struct dc_link *link); bool dp_set_test_pattern( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h index ab437a0c9101..9ff4a6c46a2b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_DP_TRACE_H__ #define __LINK_DP_TRACE_H__ -#include "link.h" +#include "link_service.h" void dp_trace_init(struct dc_link *link); void dp_trace_reset(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index b68bcc9fca0a..892907991f91 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -138,8 +138,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) stream_encoder->funcs->dvi_set_stream_attribute( stream_encoder, &stream->timing, - (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? - true : false); + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK); else if (dc_is_lvds_signal(stream->signal)) stream_encoder->funcs->lvds_set_stream_attribute( stream_encoder, diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 45f0e091fcb0..4a25210a344f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -27,7 +27,7 @@ #define __LINK_HWSS_DIO_H__ #include "link_hwss.h" -#include "link.h" +#include "link_service.h" const struct link_hwss *get_dio_link_hwss(void); bool can_use_dio_link_hwss(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h index 9ac08a332540..cf578a8662a4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h @@ -25,7 +25,7 @@ #ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ #define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ -#include "link.h" +#include "link_service.h" uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link); uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 1d3ed8ca83b5..7c9005bc2587 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -26,7 +26,7 @@ #define __LINK_HWSS_HPO_DP_H__ #include "link_hwss.h" -#include "link.h" +#include "link_service.h" void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 116ff37126e7..55c5148de800 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { - uint8_t clk_src = 0x4C; + uint8_t clk_src = 0xC4; uint8_t pattern = 0x4F; /* SQ128 */ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h index 82301187bc7c..8bf36827ecfb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h @@ -25,7 +25,7 @@ #ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ #define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ -#include "link.h" +#include "link_service.h" bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link); const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 9655e6fa53a4..85303167a553 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link, if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - if (!detect_dp_sink_caps(link)) + if (!detect_dp_sink_caps(link)) { return false; + } if (is_dp_branch_device(link)) /* DP SST branch */ @@ -655,7 +656,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link) return true; is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); - DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); + DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index); if (is_in_alt_mode) return true; @@ -1139,6 +1140,10 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + else if (dc_is_dvi_signal(sink->sink_signal) && + aud_support->hdmi_audio_native && + sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) dp_trace_init(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h index 7da05078721e..1ab29476060b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h @@ -25,7 +25,7 @@ #ifndef __DC_LINK_DETECTION_H__ #define __DC_LINK_DETECTION_H__ -#include "link.h" +#include "link_service.h" bool link_detect(struct dc_link *link, enum dc_detect_reason reason); bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 273a3be6d593..83419e1a9036 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) } } - if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) && + (link->type != dc_connection_none)) dpcd_write_rx_power_ctrl(link, false); } } @@ -831,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; @@ -842,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } @@ -2296,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; } - /* get dp overhead for dp tunneling */ - link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link); req_bw += link->dpia_bw_alloc_config.dp_overhead; link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw); @@ -2358,9 +2358,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->sink) { if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { - DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__, pipe_ctx->stream->sink->edid_caps.display_name, - pipe_ctx->stream->signal); + pipe_ctx->stream->signal, link->link_index); } } @@ -2458,7 +2458,6 @@ void link_set_dpms_on( struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc; enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; @@ -2474,9 +2473,10 @@ void link_set_dpms_on( if (pipe_ctx->stream->sink) { if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { - DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__, pipe_ctx->stream->sink->edid_caps.display_name, - pipe_ctx->stream->signal); + pipe_ctx->stream->signal, + link->link_index); } } @@ -2502,8 +2502,6 @@ void link_set_dpms_on( pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); } - link_hwss->setup_stream_attribute(pipe_ctx); - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; // Enable VPG before building infoframe @@ -2537,6 +2535,14 @@ void link_set_dpms_on( !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; update_psp_stream_config(pipe_ctx, false); + + if (link->is_dds) { + uint32_t post_oui_delay = 30; // 30ms + + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + } + return; } @@ -2629,6 +2635,15 @@ void link_set_dpms_on( dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); + /* Corruption was observed on systems with display mux when stream gets + * enabled after the mux switch. Having a small delay between link + * training and stream unblank resolves the corruption issue. + * This is workaround. + */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + link->is_display_mux_present) + msleep(20); + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h index 9398f9c1666a..bd6fc63064a3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DPMS_H__ #define __DC_LINK_DPMS_H__ -#include "link.h" +#include "link_service.h" void link_set_dpms_on( struct dc_state *state, struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 1a04f4b74585..31a73867cd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv) { link_srv->validate_mode_timing = link_validate_mode_timing; link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; - link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; + link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth; link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes; } @@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv) link_srv->dp_overwrite_extended_receiver_cap = dp_overwrite_extended_receiver_cap; link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode; + link_srv->dp_get_lttpr_count = dp_get_lttpr_count; + link_srv->edp_get_alpm_support = edp_get_alpm_support; } /* link dp phy/dpia implements basic dp phy/dpia functionality such as @@ -539,10 +541,16 @@ static bool construct_phy(struct dc_link *link, break; case CONNECTOR_ID_EDP: + // If smartmux is supported, only create the link on the primary eDP. + // Dual eDP is not supported with smartmux. + if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0)) + goto create_fail; + link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { - if (!link->dc->config.allow_edp_hotplug_detection) + if (!link->dc->config.allow_edp_hotplug_detection + && !is_smartmux_suported(link)) link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; switch (link->dc->config.allow_edp_hotplug_detection) { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h index e96220d48d03..aad36ca1a31c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_FACTORY_H__ #define __LINK_FACTORY_H__ -#include "link.h" +#include "link_service.h" struct dc_link *link_create(const struct link_init_data *init_params); void link_destroy(struct dc_link **link); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h index 1907bda3cb6e..f7aa3bc3a93a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_RESOURCE_H__ #define __LINK_RESOURCE_H__ -#include "link.h" +#include "link_service.h" void link_get_cur_res_map(const struct dc *dc, uint32_t *map); void link_restore_res_map(const struct dc *dc, uint32_t *map); void link_get_cur_link_res(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 29606fda029d..acdc162de535 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing( if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) return false; break; + case PIXEL_ENCODING_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; default: /* Invalid Pixel Encoding*/ return false; @@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_max_bpc < 12) return false; break; + case COLOR_DEPTH_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: @@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps( return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; } +static uint32_t dp_get_timing_bandwidth_kbps( + const struct dc_crtc_timing *timing, + const struct dc_link *link) +{ + return dc_bandwidth_in_kbps_from_timing(timing, + dc_link_get_highest_encoding_format(link)); +} + static bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -351,63 +367,83 @@ enum dc_status link_validate_mode_timing( return DC_OK; } +static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context, + const struct dc_stream_state *stream) +{ + int i; + const struct dc_tunnel_settings *dp_tunnel_settings = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) { + dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings; + break; + } + } + + return dp_tunnel_settings; +} + /* - * This function calculates the bandwidth required for the stream timing - * and aggregates the stream bandwidth for the respective dpia link + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * @stream: pointer to the dc_stream_state struct instance - * @num_streams: number of streams to be validated - * - * return: true if validation is succeeded + * return: dc_status */ -bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) +enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - int bw_needed[MAX_DPIA_NUM] = {0}; - struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; - int num_dpias = 0; - - for (unsigned int i = 0; i < num_streams; ++i) { - if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { - /* new dpia sst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; + struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 }; + uint8_t link_count = 0; + enum dc_status result = DC_OK; - dpia_link[num_dpias] = stream[i].link; - bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[num_dpias])); - num_dpias++; - } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - uint8_t j = 0; - /* check whether its a known dpia link */ - for (; j < num_dpias; ++j) { - if (dpia_link[j] == stream[i].link) - break; - } + // Iterate through streams in the new context + for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) { + const struct dc_stream_state *stream = new_ctx->streams[i]; + const struct dc_link *link; + const struct dc_tunnel_settings *dp_tunnel_settings; + uint32_t timing_bw; + + if (stream == NULL) + continue; + + link = stream->link; + + if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT + || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST))) + continue; + + if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false)) + continue; - if (j == num_dpias) { - /* new dpia mst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; - else { - dpia_link[j] = stream[i].link; - num_dpias++; - } + dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream); + + if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false)) + continue; + + timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link); + + // Find an existing entry for this 'link' in 'dpia_link_sets' + for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) { + bool is_new_slot = false; + + if (dpia_link_sets[j].link == NULL) { + is_new_slot = true; + link_count++; + dpia_link_sets[j].required_bw = 0; + dpia_link_sets[j].link = link; } - bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[j])); + if (is_new_slot || (dpia_link_sets[j].link == link)) { + dpia_link_sets[j].tunnel_settings = dp_tunnel_settings; + dpia_link_sets[j].required_bw += timing_bw; + break; + } } } - /* Include dp overheads */ - for (uint8_t i = 0; i < num_dpias; ++i) { - int dp_overhead = 0; - - dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]); - bw_needed[i] += dp_overhead; - } + if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false) + result = DC_FAIL_DP_TUNNEL_BW_VALIDATE; - return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias); + return result; } struct dp_audio_layout_config { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index bf398c49c3e8..595774e76453 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -24,15 +24,15 @@ */ #ifndef __LINK_VALIDATION_H__ #define __LINK_VALIDATION_H__ -#include "link.h" +#include "link_service.h" enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, const struct dc_crtc_timing *timing); -bool link_validate_dpia_bandwidth( - const struct dc_stream_state *stream, - const unsigned int num_streams); +enum dc_status link_validate_dp_tunnel_bandwidth( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t dp_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index a3e25e55bed6..d3e6f01a6a90 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -26,7 +26,7 @@ #ifndef __DAL_DDC_SERVICE_H__ #define __DAL_DDC_SERVICE_H__ -#include "link.h" +#include "link_service.h" #define AUX_POWER_UP_WA_DELAY 500 #define I2C_OVER_AUX_DEFER_WA_DELAY 70 diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 0f965380a9b4..b12c11bd6a14 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1388,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link) struct dpcd_amd_signature amd_signature = {0}; struct dpcd_amd_device_id amd_device_id = {0}; + if (link->is_dds) { + uint8_t dpcd_dp_edp_backlight_mode = 0; + + /* + * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register + * if platform is DDS + */ + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + dpcd_dp_edp_backlight_mode &= ~0x3; + + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + } + amd_device_id.device_id_byte1 = (uint8_t)(link->ctx->asic_id.chip_id); amd_device_id.device_id_byte2 = @@ -1510,8 +1525,8 @@ bool read_is_mst_supported(struct dc_link *link) return false; } - rev.raw = 0; - cap.raw = 0; + rev.raw = 0; + cap.raw = 0; st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, sizeof(rev)); @@ -1543,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) { + link->dpcd_sink_ext_caps.raw = 0; + return false; + } if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) return false; @@ -2106,13 +2125,13 @@ void detect_edp_sink_caps(struct dc_link *link) &backlight_adj_cap, sizeof(backlight_adj_cap)); link->dpcd_caps.dynamic_backlight_capable_edp = - (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; + (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false; core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, &general_edp_cap, sizeof(general_edp_cap)); link->dpcd_caps.set_power_state_capable_edp = - (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; + (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false; set_default_brightness_aux(link); @@ -2176,6 +2195,12 @@ void detect_edp_sink_caps(struct dc_link *link) DP_EDP_MSO_LINK_CAPABILITIES, (uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported, sizeof(link->dpcd_caps.mso_cap_sst_links_supported)); + /* + * Read eDP general capability 2 + */ + core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, + (uint8_t *)&link->dpcd_caps.dp_edp_general_cap_2, + sizeof(link->dpcd_caps.dp_edp_general_cap_2)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) @@ -2487,3 +2512,40 @@ bool dp_is_sink_present(struct dc_link *link) return present; } + +uint8_t dp_get_lttpr_count(struct dc_link *link) +{ + if (dp_is_lttpr_present(link)) + return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + return 0; +} + +void edp_get_alpm_support(struct dc_link *link, + bool *auxless_support, + bool *auxwake_support) +{ + bool lttpr_present = dp_is_lttpr_present(link); + + if (auxless_support == NULL || auxwake_support == NULL) + return; + + *auxless_support = false; + *auxwake_support = false; + + if (!dc_is_embedded_signal(link->connector_signal)) + return; + + if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) { + if (lttpr_present) { + if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED) + *auxless_support = true; + } else + *auxless_support = true; + } + + if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) { + if (!lttpr_present) + *auxwake_support = true; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h index 940b147cc5d4..6e17f72a752f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_CAPABILITY_H__ #define __DC_LINK_DP_CAPABILITY_H__ -#include "link.h" +#include "link_service.h" bool detect_dp_sink_caps(struct dc_link *link); @@ -108,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); bool dp_overwrite_extended_receiver_cap(struct dc_link *link); +uint8_t dp_get_lttpr_count(struct dc_link *link); + +void edp_get_alpm_support(struct dc_link *link, + bool *auxless_support, + bool *auxwake_support); + #endif /* __DC_LINK_DP_CAPABILITY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 22bfdced64ab..9b2f1a7da1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -75,12 +75,15 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) { status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY, - dpcd_dp_tun_data, 1); + dpcd_dp_tun_data, 2); if (status != DC_OK) goto err; - link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0]; + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = + dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY]; + link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY]; } DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) " @@ -155,8 +158,14 @@ void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling; if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc - && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { dp_tunnel_setting->should_use_dp_bw_allocation = true; + dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F; + dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id; + dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw; + dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw; + dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity; + } } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h index a61edfc9ca7a..7cd03fa4892b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -27,7 +27,7 @@ #ifndef __DC_LINK_DPIA_H__ #define __DC_LINK_DPIA_H__ -#include "link.h" +#include "link_service.h" /* Read tunneling device capability from DPCD and update link capability * accordingly. diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 3af7564a84f1..8a3c18ae97a7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -35,6 +35,8 @@ #define Kbps_TO_Gbps (1000 * 1000) +#define MST_TIME_SLOT_COUNT 64 + // ------------------------------------------------------------------ // PRIVATE FUNCTIONS // ------------------------------------------------------------------ @@ -46,8 +48,7 @@ */ static bool link_dp_is_bw_alloc_available(struct dc_link *link) { - return (link && link->hpd_status - && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support); } @@ -160,78 +161,6 @@ static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link) link->dpia_bw_alloc_config.nrd_max_lane_count); } -static uint8_t get_lowest_dpia_index(struct dc_link *link) -{ - const struct dc *dc_struct = link->dc; - uint8_t idx = 0xFF; - int i; - - for (i = 0; i < MAX_LINKS; ++i) { - - if (!dc_struct->links[i] || - dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - if (idx > dc_struct->links[i]->link_index) { - idx = dc_struct->links[i]->link_index; - break; - } - } - - return idx; -} - -/* - * Get the maximum dp tunnel banwidth of host router - * - * @dc: pointer to the dc struct instance - * @hr_index: host router index - * - * return: host router maximum dp tunnel bandwidth - */ -static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index) -{ - uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]); - uint8_t hr_index_temp = 0; - struct dc_link *link_dpia_primary, *link_dpia_secondary; - int total_bw = 0; - - for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { - - if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2; - - if (hr_index_temp == hr_index) { - link_dpia_primary = dc->links[i]; - link_dpia_secondary = dc->links[i + 1]; - - /** - * If BW allocation enabled on both DPIAs, then - * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary) - * otherwise HR BW = Estimated(bw alloc enabled dpia) - */ - if ((link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) && - (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) { - total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw + - link_dpia_secondary->dpia_bw_alloc_config.allocated_bw; - } else if (link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw; - } else if (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw; - } - break; - } - } - - return total_bw; -} - /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up @@ -251,32 +180,40 @@ static void dpia_bw_alloc_unplug(struct dc_link *link) static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw) { - uint8_t requested_bw; - uint32_t temp; + uint8_t request_reg_val; + uint32_t temp, request_bw; - /* Error check whether request bw greater than allocated */ - if (req_bw > link->dpia_bw_alloc_config.estimated_bw) { - DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n", - __func__, link->link_index); - req_bw = link->dpia_bw_alloc_config.estimated_bw; + if (link->dpia_bw_alloc_config.bw_granularity == 0) { + DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index); + return; } temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; - requested_bw = temp / Kbps_TO_Gbps; + request_reg_val = temp / Kbps_TO_Gbps; /* Always make sure to add more to account for floating points */ if (temp % Kbps_TO_Gbps) - ++requested_bw; + ++request_reg_val; - /* Error check whether requested and allocated are equal */ - req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { - DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n", - __func__, link->link_index); + request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + if (request_bw > link->dpia_bw_alloc_config.estimated_bw) { + DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!", + __func__, link->link_index, + req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw); + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + request_reg_val = temp / Kbps_TO_Gbps; + if (temp % Kbps_TO_Gbps) + ++request_reg_val; } + link->dpia_bw_alloc_config.allocated_bw = request_bw; + DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw); + core_link_write_dpcd(link, REQUESTED_BW, - &requested_bw, + &request_reg_val, sizeof(uint8_t)); } @@ -288,33 +225,40 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) bool ret = false; uint8_t val; - if (link->hpd_status) { - val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; + if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) { + DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index); + return false; + } - if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { - DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index); + val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; - retrieve_usb4_dp_bw_allocation_info(link); + if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { + DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index); - if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) { - link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; - link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; - } + retrieve_usb4_dp_bw_allocation_info(link); - link->dpia_bw_alloc_config.bw_alloc_enabled = true; - ret = true; + if ( + link->dpia_bw_alloc_config.nrd_max_link_rate + && link->dpia_bw_alloc_config.nrd_max_lane_count) { + link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + } + + link->dpia_bw_alloc_config.bw_alloc_enabled = true; + ret = true; + if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) { /* - * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other - * DPIA. CM release preallocation only when allocation is complete. Do zero alloc - * to make the CM to release preallocation and update estimated BW correctly for - * all DPIAs per host router + * During DP tunnel creation, the CM preallocates BW + * and reduces the estimated BW of other DPIAs. + * The CM releases the preallocation only when the allocation is complete. + * Perform a zero allocation to make the CM release the preallocation + * and correctly update the estimated BW for all DPIAs per host router. */ - // TODO: Zero allocation can be removed once the MSFT CM fix has been released link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); - } else - DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); - } + } + } else + DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); return ret; } @@ -329,19 +273,17 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } @@ -359,24 +301,25 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe { if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling && link->dpia_bw_alloc_config.bw_alloc_enabled) { - //1. Hot Plug - if (link->hpd_status && peak_bw > 0) { + if (peak_bw > 0) { // If DP over USB4 then we need to check BW allocation link->dpia_bw_alloc_config.link_max_bw = peak_bw; link_dpia_send_bw_alloc_request(link, peak_bw); - } - //2. Cold Unplug - else if (!link->hpd_status) + } else dpia_bw_alloc_unplug(link); } } void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { - DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n", + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + + DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d", __func__, link->link_index, link->hpd_status, - link->dpia_bw_alloc_config.allocated_bw, req_bw); + link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.estimated_bw, + req_bw); if (link_dp_is_bw_alloc_available(link)) link_dpia_send_bw_alloc_request(link, req_bw); @@ -384,73 +327,116 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__); } -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link) { - bool ret = true; - int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0; - uint8_t lowest_dpia_index, i, hr_index; + uint32_t link_dp_overhead = 0; - if (!num_dpias || num_dpias > MAX_DPIA_NUM) - return ret; + if ((link->type == dc_connection_mst_branch) && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH + * MST overhead is 1/64 of link bandwidth (excluding any overhead) + */ + const struct dc_link_settings *link_cap = dc_link_get_link_cap(link); - lowest_dpia_index = get_lowest_dpia_index(link[0]); + if (link_cap) { + uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * + (uint32_t)link_cap->lane_count * + LINK_RATE_REF_FREQ_IN_KHZ * 8; + link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT) + + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0); + } + } - /* get total Host Router BW with granularity for the given modes */ - for (i = 0; i < num_dpias; ++i) { - int granularity_Gbps = 0; - int bw_granularity = 0; + return link_dp_overhead; +} - if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) - continue; +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * And then validate if the required bandwidth is within the router's capacity. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count) +{ + uint32_t granularity_Gbps; + const struct dc_link *link; + uint32_t link_bw_granularity; + uint32_t link_required_bw; + struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 }; + uint8_t i; + bool is_success = true; + uint8_t router_count = 0; + + if ((dpia_link_sets == NULL) || (count == 0)) + return is_success; + + // Iterate through each DP tunneling link (DPIA). + // Aggregate its bandwidth requirements onto the respective USB4 router. + for (i = 0; i < count; i++) { + link = dpia_link_sets[i].link; + link_required_bw = dpia_link_sets[i].required_bw; + const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings; + + if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0) + break; - if (link[i]->link_index < lowest_dpia_index) - continue; + if (link->type == dc_connection_mst_branch) + link_required_bw += link_dpia_get_dp_overhead(link); - granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity); - bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps + - ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0); + granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity); + link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps + + ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0); - hr_index = (link[i]->link_index - lowest_dpia_index) / 2; - bw_needed_per_hr[hr_index] += bw_granularity; - } + // Find or add the USB4 router associated with the current DPIA link + for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) { + if (router_sets[j].is_valid == false) { + router_sets[j].is_valid = true; + router_sets[j].cm_id = dp_tunnel_settings->cm_id; + router_count++; + } + + if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) { + uint32_t remaining_bw = + dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw; + + router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw; + + if (remaining_bw > router_sets[j].remaining_bw) + router_sets[j].remaining_bw = remaining_bw; - /* validate against each Host Router max BW */ - for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) { - if (bw_needed_per_hr[hr_index]) { - host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index); - if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) { - ret = false; + // Get the max estimated BW within the same CM_ID + if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw) + router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw; + + router_sets[j].required_bw += link_bw_granularity; + router_sets[j].dpia_count++; break; } } } - return ret; -} + // Validate bandwidth for each unique router found. + for (i = 0; i < router_count; i++) { + uint32_t total_bw = 0; -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link) -{ - int dp_overhead = 0, link_mst_overhead = 0; + if (router_sets[i].is_valid == false) + break; - if (!link_dp_is_bw_alloc_available(link)) - return dp_overhead; + // Determine the total available bandwidth for the current router based on aggregated data + if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0)) + total_bw = router_sets[i].estimated_bw; + else + total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw; - /* if its mst link, add MTPH overhead */ - if ((link->type == dc_connection_mst_branch) && - !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH - * MST overhead is 1/64 of link bandwidth (excluding any overhead) - */ - const struct dc_link_settings *link_cap = - dc_link_get_link_cap(link); - uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * - (uint32_t)link_cap->lane_count * - LINK_RATE_REF_FREQ_IN_KHZ * 8; - link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0); + if (router_sets[i].required_bw > total_bw) { + is_success = false; + break; + } } - /* add all the overheads */ - dp_overhead = link_mst_overhead; - - return dp_overhead; + return is_success; } + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 801965b5f9a4..30cd8e2b9d35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -26,12 +26,8 @@ #ifndef DC_INC_LINK_DP_DPIA_BW_H_ #define DC_INC_LINK_DP_DPIA_BW_H_ -#include "link.h" +#include "link_service.h" -/* Number of Host Routers per motherboard is 2 */ -#define MAX_HR_NUM 2 -/* Number of DPIA per host router is 2 */ -#define MAX_DPIA_NUM (MAX_HR_NUM * 2) /* * Host Router BW type @@ -42,6 +38,16 @@ enum bw_type { HOST_ROUTER_BW_INVALID, }; +struct usb4_router_validation_set { + bool is_valid; + uint8_t cm_id; + uint8_t dpia_count; + uint32_t required_bw; + uint32_t allocated_bw; + uint32_t estimated_bw; + uint32_t remaining_bw; +}; + /* * Enable USB4 DP BW allocation mode * @@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); /* - * Handle the validation of total BW here and confirm that the bw used by each - * DPIA doesn't exceed available BW for each host router (HR) - * - * @link[]: array of link pointer to all possible DPIA links - * @bw_needed[]: bw needed for each DPIA link based on timing - * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE - */ -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); - -/* * Obtain all the DP overheads in dp tunneling for the dpia link * * @link: pointer to the dc_link struct instance * * return: DP overheads in DP tunneling */ -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link); /* * Handle DP BW allocation status register @@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status); +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count); + #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h index ac33730fedd4..87516fb3b45a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_IRQ_HANDLER_H__ #define __DC_LINK_DP_IRQ_HANDLER_H__ -#include "link.h" +#include "link_service.h" bool dp_parse_link_loss_status( struct dc_link *link, union hpd_irq_data *hpd_irq_dpcd_data); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h index ab1c1f8f1f8b..58e154494582 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_PHY_H__ #define __DC_LINK_DP_PHY_H__ -#include "link.h" +#include "link_service.h" void dp_enable_link_phy( struct dc_link *link, const struct link_resource *link_res, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 2dc1a660e504..08e2b572e0ff 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1018,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e { enum dc_status status; uint8_t sink_status = 0; - uint8_t i; + uint32_t i; + uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10); + + // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL) + // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case /* clear training pattern set */ status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); @@ -1028,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e if (encoding == DP_128b_132b_ENCODING) { /* poll for intra-hop disable */ - for (i = 0; i < 10; i++) { + for (i = 0; i < intra_hop_disable_time_ms; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) break; @@ -1724,6 +1729,15 @@ bool perform_link_training_with_retries( break; } + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && + !link->dc->config.enable_dpia_pre_training) { + if (j == (attempts - 1)) + do_fallback = true; + else + do_fallback = false; + } + if (j == (attempts - 1)) { DC_LOG_WARNING( "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h index 574b083e0936..ce52de22ab7a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_TRAINING_H__ #define __DC_LINK_DP_TRAINING_H__ -#include "link.h" +#include "link_service.h" bool perform_link_training_with_retries( const struct dc_link_settings *link_setting, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h index 08d787a1e451..c2717c678c72 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h @@ -25,7 +25,7 @@ #ifndef __LINK_DPCD_H__ #define __LINK_DPCD_H__ -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" enum dc_status core_link_read_dpcd( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index da74c2b5854f..5e806edbb9f6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds && !link->dpcd_caps.panel_luminance_control) + return true; + // use internal backlight control if dmub capabilities are not present if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && !link->dc->caps.dmub_caps.aux_backlight_support) { @@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + //make sure we disable AMD ABC first. + core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, sizeof(uint8_t)); + if (backlight_enable) { + backlight_enable = 0; + core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, 1); + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(uint8_t)); @@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; uint8_t backlight_control = isHDR ? 1 : 0; + uint8_t backlight_enable = 0; + // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; + //make sure we disable VESA ABC first. + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) { + backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(backlight_enable)); + } + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) @@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) @@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable) link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return true; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; @@ -675,6 +703,20 @@ bool edp_setup_psr(struct dc_link *link, if (!link) return false; + /* This is a workaround: some vendors require the source to + * read the PSR cap; otherwise, the vendor's PSR feature will + * fall back to its default behavior, causing a misconfiguration + * of this feature. + */ + if (link->panel_config.psr.read_psrcap_again) { + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_PSR_SUPPORT, + &link->dpcd_caps.psr_info.psr_version, + sizeof(link->dpcd_caps.psr_info.psr_version)); + } + //Clear PSR cfg memset(&psr_configuration, 0, sizeof(psr_configuration)); dm_helpers_dp_write_dpcd( @@ -842,6 +884,8 @@ bool edp_setup_psr(struct dc_link *link, psr_context->dsc_slice_height = psr_config->dsc_slice_height; + psr_context->os_request_force_ffu = psr_config->os_request_force_ffu; + if (psr) { link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context, panel_inst); @@ -1001,6 +1045,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream replay_context.line_time_in_ns = lineTimeInNs; + replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu; + link->replay_settings.replay_feature_enabled = replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); if (link->replay_settings.replay_feature_enabled) { @@ -1014,7 +1060,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); memset(&alpm_config, 0, sizeof(alpm_config)); - alpm_config.bits.ENABLE = 1; + alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0; + + if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) { + alpm_config.bits.ALPM_MODE_SEL = 1; + alpm_config.bits.ACDS_PERIOD_DURATION = 0; + } + dm_helpers_dp_write_dpcd( link->ctx, link, @@ -1173,6 +1225,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } +bool is_smartmux_suported(struct dc_link *link) +{ + if (link->dc->caps.is_apu) + return false; + if (!link->dc->config.smart_mux_version) + return false; + + return true; +} + static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, struct link_resource *link_res, bool enable) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index bcfa6ac5d4e7..62a6344e613e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -25,11 +25,12 @@ #ifndef __DC_LINK_EDP_PANEL_CONTROL_H__ #define __DC_LINK_EDP_PANEL_CONTROL_H__ -#include "link.h" +#include "link_service.h" enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool is_smartmux_suported(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h index 4fb526b264f9..af529328ba17 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_HPD_H__ #define __DC_LINK_HPD_H__ -#include "link.h" +#include "link_service.h" enum hpd_source_id get_hpd_line(struct dc_link *link); /* diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c index 259a98e4ee2c..2a422e223bf2 100644 --- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c @@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf); - memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height); + memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height); memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2); REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0); diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile index 1e2e66508192..5402c3529f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile @@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401)) AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401) -endif +endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index b4cea2b8cb2a..6f0e017a8ae2 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -30,7 +30,6 @@ #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #include "dc.h" -#include "dcn401/dcn401_mpc.h" #define REG(reg)\ mpc30->mpc_regs->reg @@ -879,7 +878,7 @@ void mpc32_set3dlut_ram10( } -static void mpc32_set_3dlut_mode( +void mpc32_set_3dlut_mode( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -1022,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, - .set_movable_cm_location = mpc401_set_movable_cm_location, - .populate_lut = mpc401_populate_lut, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h index 9622518826c9..8c9b20bcca85 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h @@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram( enum dc_lut_mode mode, bool is_color_channel_12bits, uint32_t mpcc_id); + +void mpc32_set_3dlut_mode( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17, + uint32_t mpcc_id); + #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index 98cf0cbd59ba..e1a0308dee57 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -287,14 +287,7 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I } } -void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id) -{ - struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc); - - REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1); -} - -static void program_gamut_remap( +void mpc_program_gamut_remap( struct mpc *mpc, unsigned int mpcc_id, const uint16_t *regval, @@ -426,7 +419,7 @@ void mpc401_set_gamut_remap( if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) { /* Bypass / Disable if type is bypass or hw */ - program_gamut_remap(mpc, mpcc_id, NULL, + mpc_program_gamut_remap(mpc, mpcc_id, NULL, adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0); } else { struct fixed31_32 arr_matrix[12]; @@ -460,12 +453,12 @@ void mpc401_set_gamut_remap( else mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2; - program_gamut_remap(mpc, mpcc_id, arr_reg_val, + mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, mode_select); } } -static void read_gamut_remap(struct mpc *mpc, +void mpc_read_gamut_remap(struct mpc *mpc, int mpcc_id, uint16_t *regval, enum mpcc_gamut_remap_id gamut_remap_block_id, @@ -561,9 +554,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc, struct mpc_grph_gamut_adjustment *adjust) { uint16_t arr_reg_val[12] = {0}; - uint32_t mode_select; + uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0; - read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); + mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) { adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; @@ -611,7 +604,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = { .populate_lut = mpc401_populate_lut, .program_lut_read_write_control = mpc401_program_lut_read_write_control, .program_lut_mode = mpc401_program_lut_mode, - .program_3dlut_size = mpc401_program_3dlut_size, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h index 8e35ebc603a9..fdc42f8ab3ff 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h @@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control( bool lut_bank_a, int mpcc_id); -void mpc401_program_3dlut_size( - struct mpc *mpc, - bool is_17x17x17, - int mpcc_id); - void mpc401_set_gamut_remap( struct mpc *mpc, int mpcc_id, @@ -241,6 +236,19 @@ void mpc401_update_3dlut_fast_load_select( int mpcc_id, int hubp_idx); +void mpc_program_gamut_remap( + struct mpc *mpc, + unsigned int mpcc_id, + const uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + enum mpcc_gamut_remap_mode_select mode_select); + +void mpc_read_gamut_remap(struct mpc *mpc, + int mpcc_id, + uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + uint32_t *mode_select); + void mpc401_update_3dlut_fast_load_select( struct mpc *mpc, int mpcc_id, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index d159e3ed3bb3..ead92ad78a23 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -62,6 +62,7 @@ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 72bff94cb57d..52d5ea98c86b 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc) REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); + REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000); + optc1_clear_optc_underflow(optc); return true; @@ -428,6 +430,21 @@ static void optc35_set_long_vtotal( } } +static void optc35_wait_otg_disable(struct timing_generator *optc) +{ + struct optc *optc1; + uint32_t is_master_en; + + if (!optc || !optc->ctx) + return; + + optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en); + if (!is_master_en) + REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000); +} + static const struct timing_generator_funcs dcn35_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -479,6 +496,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = { .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc35_set_odm_combine, .get_optc_source = optc2_get_optc_source, + .wait_otg_disable = optc35_wait_otg_disable, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index ff79c38287df..5af13706e601 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -226,6 +226,11 @@ bool optc401_disable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 0); + // wait until CRTC_CURRENT_MASTER_EN_STATE == 0 + REG_WAIT(OTG_CONTROL, + OTG_CURRENT_MASTER_EN_STATE, + 0, 10, 15000); + /* CRTC disabled, so disable clock. */ REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index f2ba76c1e0c0..782316348941 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -31,6 +31,7 @@ #include <linux/kgdb.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/vmalloc.h> #include <asm/byteorder.h> diff --git a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c index af21c0a27f86..72bd43f9bbe2 100644 --- a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c @@ -79,16 +79,12 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl = 0; - bool block_enabled; - - /*need to enable dscclk regardless DSC_PG*/ - if (pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on) - pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc( - pg_cntl->ctx->dc->res_pool->dccg, dsc_inst); + bool block_enabled = false; + bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg || + pg_cntl->ctx->dc->debug.disable_dsc_power_gate || + pg_cntl->ctx->dc->idle_optimizations_allowed; - if (pg_cntl->ctx->dc->debug.ignore_pg || - pg_cntl->ctx->dc->debug.disable_dsc_power_gate || - pg_cntl->ctx->dc->idle_optimizations_allowed) + if (skip_pg && !power_on) return; block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst); @@ -111,7 +107,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); + 1, 10000); break; case 1: /* DSC1 */ REG_UPDATE(DOMAIN17_PG_CONFIG, @@ -119,7 +115,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); + 1, 10000); break; case 2: /* DSC2 */ REG_UPDATE(DOMAIN18_PG_CONFIG, @@ -127,7 +123,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); + 1, 10000); break; case 3: /* DSC3 */ REG_UPDATE(DOMAIN19_PG_CONFIG, @@ -135,7 +131,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo REG_WAIT(DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, - 1, 1000); + 1, 10000); break; default: BREAK_TO_DEBUGGER(); @@ -144,12 +140,6 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo if (dsc_inst < MAX_PIPES) pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on; - - if (pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) { - /*this is to disable dscclk*/ - pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc( - pg_cntl->ctx->dc->res_pool->dccg, dsc_inst); - } } static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst) @@ -189,11 +179,12 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl; bool block_enabled; + bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg || + pg_cntl->ctx->dc->debug.disable_hubp_power_gate || + pg_cntl->ctx->dc->debug.disable_dpp_power_gate || + pg_cntl->ctx->dc->idle_optimizations_allowed; - if (pg_cntl->ctx->dc->debug.ignore_pg || - pg_cntl->ctx->dc->debug.disable_hubp_power_gate || - pg_cntl->ctx->dc->debug.disable_dpp_power_gate || - pg_cntl->ctx->dc->idle_optimizations_allowed) + if (skip_pg && !power_on) return; block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst); @@ -213,22 +204,22 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp case 0: /* DPP0 & HUBP0 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); - REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); + REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000); break; case 1: /* DPP1 & HUBP1 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); - REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); + REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000); break; case 2: /* DPP2 & HUBP2 */ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); - REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); + REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000); break; case 3: /* DPP3 & HUBP3 */ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); - REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); + REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000); break; default: BREAK_TO_DEBUGGER(); @@ -501,6 +492,36 @@ void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl) pg_cntl->pg_res_enable[PG_DWB] = block_enabled; } +static void pg_cntl35_print_pg_status(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log) +{ + int i = 0; + bool block_enabled = false; + + DC_LOG_DEBUG("%s: %s", debug_func, debug_log); + + DC_LOG_DEBUG("PG_CNTL status:\n"); + + block_enabled = pg_cntl35_io_clk_status(pg_cntl); + DC_LOG_DEBUG("ONO0=%d (DCCG, DIO, DCIO)\n", block_enabled ? 1 : 0); + + block_enabled = pg_cntl35_mem_status(pg_cntl); + DC_LOG_DEBUG("ONO1=%d (DCHUBBUB, DCHVM, DCHUBBUBMEM)\n", block_enabled ? 1 : 0); + + block_enabled = pg_cntl35_plane_otg_status(pg_cntl); + DC_LOG_DEBUG("ONO2=%d (MPC, OPP, OPTC, DWB)\n", block_enabled ? 1 : 0); + + block_enabled = pg_cntl35_hpo_pg_status(pg_cntl); + DC_LOG_DEBUG("ONO3=%d (HPO)\n", block_enabled ? 1 : 0); + + for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) { + block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i); + DC_LOG_DEBUG("ONO%d=%d (DCHUBP%d, DPP%d)\n", 4 + i * 2, block_enabled ? 1 : 0, i, i); + + block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i); + DC_LOG_DEBUG("ONO%d=%d (DSC%d)\n", 5 + i * 2, block_enabled ? 1 : 0, i); + } +} + static const struct pg_cntl_funcs pg_cntl35_funcs = { .init_pg_status = pg_cntl35_init_pg_status, .dsc_pg_control = pg_cntl35_dsc_pg_control, @@ -511,7 +532,8 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = { .mpcc_pg_control = pg_cntl35_mpcc_pg_control, .opp_pg_control = pg_cntl35_opp_pg_control, .optc_pg_control = pg_cntl35_optc_pg_control, - .dwb_pg_control = pg_cntl35_dwb_pg_control + .dwb_pg_control = pg_cntl35_dwb_pg_control, + .print_pg_status = pg_cntl35_print_pg_status }; struct pg_cntl *pg_cntl35_create( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 84f73fdb0f95..c4b4dc3ad8c9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -29,6 +29,7 @@ #include "stream_encoder.h" #include "resource.h" +#include "clk_mgr.h" #include "include/irq_service_interface.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" @@ -836,17 +837,24 @@ static enum dc_status build_mapped_resource( return DC_OK; } -static enum dc_status dce100_validate_bandwidth( +enum dc_status dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; + struct dc_stream_state *stream = NULL; + const uint32_t max_pix_clk_khz = max(dc->clk_mgr->clks.max_supported_dispclk_khz, 400000); for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream) + stream = context->res_ctx.pipe_ctx[i].stream; + if (stream) { at_least_one_pipe = true; + + if (stream->timing.pix_clk_100hz >= max_pix_clk_khz * 10) + return DC_FAIL_BANDWIDTH_VALIDATE; + } } if (at_least_one_pipe) { @@ -854,7 +862,16 @@ static enum dc_status dce100_validate_bandwidth( context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; + /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and + * the DP clock, and shouldn't be turned off. Just select the display + * clock value from its low power mode. + */ + if (dc->ctx->dce_version == DCE_VERSION_6_0 || + dc->ctx->dce_version == DCE_VERSION_6_4) + context->bw_ctx.bw.dce.dispclk_khz = 352000; + else + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; } @@ -881,7 +898,7 @@ static bool dce100_validate_surface_sets( return true; } -static enum dc_status dce100_validate_global( +enum dc_status dce100_validate_global( struct dc *dc, struct dc_state *context) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h index fecab7c560f5..dd150a4b4610 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h @@ -41,6 +41,15 @@ struct resource_pool *dce100_create_resource_pool( enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps); +enum dc_status dce100_validate_global( + struct dc *dc, + struct dc_state *context); + +enum dc_status dce100_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + enum dc_validate_mode validate_mode); + enum dc_status dce100_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index f3d5baac11bf..cccde5a6f3cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -963,7 +963,7 @@ static enum dc_status build_mapped_resource( static enum dc_status dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 4225cae68c10..869a8e515fc0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -886,7 +886,7 @@ static enum dc_status build_mapped_resource( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; @@ -1111,12 +1111,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * memory_type_multiplier, 1000); + (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, + (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, + (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, 1000); return; @@ -1152,12 +1152,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); + (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, + (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, + (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 6221d749246d..3efc4c55d2d2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -45,7 +45,7 @@ enum dc_status dce112_validate_with_context( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dce112_add_stream_to_ctx( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index eb1e158d3436..540e04ec1e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -67,7 +67,7 @@ #include "reg_helper.h" #include "dce100/dce100_resource.h" -#include "link.h" +#include "link_service.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f @@ -990,12 +990,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) memory_type_multiplier = MEMORY_TYPE_HBM; dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); + (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, + (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, + (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index d9ffdded5ce1..53c67ebe779f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -34,6 +34,7 @@ #include "stream_encoder.h" #include "resource.h" +#include "clk_mgr.h" #include "include/irq_service_interface.h" #include "irq/dce60/irq_service_dce60.h" #include "dce110/dce110_timing_generator.h" @@ -373,7 +374,7 @@ static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, - .num_pll = 2, + .num_pll = 3, .num_ddc = 6, }; @@ -389,7 +390,7 @@ static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, .num_stream_encoder = 2, - .num_pll = 2, + .num_pll = 3, .num_ddc = 2, }; @@ -863,61 +864,6 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool) } } -static enum dc_status dce60_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - int i; - bool at_least_one_pipe = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream) - at_least_one_pipe = true; - } - - if (at_least_one_pipe) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw_ctx.bw.dce.dispclk_khz = 681000; - context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; - } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; - context->bw_ctx.bw.dce.yclk_khz = 0; - } - - return DC_OK; -} - -static bool dce60_validate_surface_sets( - struct dc_state *context) -{ - int i; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 1) - return false; - - if (context->stream_status[i].plane_states[0]->format - >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - } - - return true; -} - -static enum dc_status dce60_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce60_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - static void dce60_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); @@ -931,10 +877,10 @@ static const struct resource_funcs dce60_res_pool_funcs = { .destroy = dce60_destroy_resource_pool, .link_enc_create = dce60_link_encoder_create, .panel_cntl_create = dce60_panel_cntl_create, - .validate_bandwidth = dce60_validate_bandwidth, + .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, - .validate_global = dce60_validate_global, + .validate_global = dce100_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; @@ -973,21 +919,24 @@ static bool dce60_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { @@ -1365,21 +1314,24 @@ static bool dce64_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index bd5811f97531..5b7769745202 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -32,6 +32,7 @@ #include "stream_encoder.h" #include "resource.h" +#include "clk_mgr.h" #include "include/irq_service_interface.h" #include "irq/dce80/irq_service_dce80.h" #include "dce110/dce110_timing_generator.h" @@ -869,61 +870,6 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool) } } -static enum dc_status dce80_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - int i; - bool at_least_one_pipe = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream) - at_least_one_pipe = true; - } - - if (at_least_one_pipe) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw_ctx.bw.dce.dispclk_khz = 681000; - context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; - } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; - context->bw_ctx.bw.dce.yclk_khz = 0; - } - - return DC_OK; -} - -static bool dce80_validate_surface_sets( - struct dc_state *context) -{ - int i; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 1) - return false; - - if (context->stream_status[i].plane_states[0]->format - >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - } - - return true; -} - -static enum dc_status dce80_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce80_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); @@ -937,10 +883,10 @@ static const struct resource_funcs dce80_res_pool_funcs = { .destroy = dce80_destroy_resource_pool, .link_enc_create = dce80_link_encoder_create, .panel_cntl_create = dce80_panel_cntl_create, - .validate_bandwidth = dce80_validate_bandwidth, + .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, - .validate_global = dce80_validate_global, + .validate_global = dce100_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index be4ade0853e9..652c05c35494 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -1129,12 +1129,12 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) static enum dc_status dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; DC_FP_START(); - voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); + voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode); DC_FP_END(); return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 3405be07f5e3..84b38d2d6967 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -85,7 +85,7 @@ #include "vm_helper.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) @@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -2125,7 +2125,7 @@ validate_out: } enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -2135,7 +2135,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index c0e062c7407d..e997d35a8b86 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params( struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode); void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context); @@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 43fa2cb117f3..e4a1338d21e0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 9ab01b65b177..918742a42ded 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -924,7 +924,7 @@ validate_out: * dcn20_validate_bandwidth in dcn20_resource.c. */ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -934,7 +934,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2f7..a017fd9854d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h @@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index f631ae34e320..ff63f59ff928 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -60,7 +60,7 @@ #include "dml/display_mode_vba.h" #include "dcn30/dcn30_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dce/dce_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; DC_FP_START(); - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only) { bool out = false; @@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw( context->bw_ctx.dml.vba.VoltageLevel = 0; context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw( dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - if (!fast_validate || !allow_self_refresh_only) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) { /* * DML favors voltage over p-state, but we're more interested in * supporting p-state over voltage. We can't support p-state in @@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw( vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } if (allow_self_refresh_only && - (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { /* * If mode is unsupported or there's still no p-state support @@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw( } if (repopulate_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); context->bw_ctx.dml.vba.VoltageLevel = vlevel; *vlevel_out = vlevel; *pipe_cnt_out = pipe_cnt; @@ -2037,7 +2037,7 @@ void dcn30_calculate_wm_and_dlg( enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -2055,7 +2055,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); if (pipe_cnt == 0) @@ -2066,7 +2066,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -2192,7 +2192,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params j = 0; // create the final dcfclk and uclk table while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; } else { @@ -2586,6 +2586,8 @@ static bool dcn30_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 689d9bdace81..2c967fe55712 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -57,14 +57,14 @@ unsigned int dcn30_calc_max_scaled_time( unsigned int urgent_watermark); enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_acquire_post_bldn_3dlut( struct resource_context *res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 121a86a59833..82a205a7c25c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 012c5fd52cb1..61623cb518d9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -47,7 +47,8 @@ #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" + #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" @@ -1481,6 +1482,8 @@ static bool dcn302_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index a8d0b4686f9a..02b9a84f2db3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -47,7 +47,7 @@ #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" @@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 51ca0b2959fc..3ed7f50554e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format) int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.gpuvm = 1; @@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc, int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context( bool upscaled = false; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1760,7 +1760,7 @@ dcn31_set_mcif_arb_params(struct dc *dc, enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1778,19 +1778,19 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -2202,6 +2204,8 @@ static bool dcn31_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; @@ -2231,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool( kfree(pool); return NULL; } + +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output) +{ + struct dc_state *state = link->dc->current_state; + int i; + +#if defined(CONFIG_DRM_AMD_DC_FP) + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) + link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]); + + for (i = 0; i < pipe_count; i++) { + link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]); + + // Setup audio + if (pipes[i].stream_res.audio != NULL) + build_audio_output(state, &pipes[i], &audio_output[i]); + } +#else + /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching, + * but the above will not compile on architectures without an FPU. + */ + DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__); + ASSERT(0); +#endif + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index dd82815d7efe..c32c85ef0ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -39,7 +39,7 @@ struct dcn31_resource_pool { enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg( int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, @@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool( unsigned int dcn31_get_det_buffer_size( const struct dc_state *context); +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 8383e2e59be5..d4917a35b991 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -926,6 +926,8 @@ static const struct dc_debug_options debug_defaults_drv = { .seamless_boot_odm_combine = true, .enable_legacy_fast_update = true, .using_dml2 = false, + .disable_dsc_power_gate = true, + .min_disp_clk_khz = 100000, }; static const struct dc_panel_config panel_config_defaults = { @@ -1667,12 +1669,12 @@ static struct clock_source *dcn31_clock_source_create( static int dcn314_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt; DC_FP_START(); - pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); + pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); DC_FP_END(); return pipe_cnt; @@ -1696,7 +1698,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1715,19 +1717,19 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc, DC_FP_START(); // do not support self refresh only - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1779,7 +1781,9 @@ static struct resource_funcs dcn314_res_pool_funcs = { .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -2117,6 +2121,8 @@ static bool dcn314_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index f8ba531d6342..ac9bb7f097d5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -41,7 +41,7 @@ struct dcn314_resource_pool { enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 6c2bb3f63be1..82cc78c291d8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; @@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context( bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { @@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = { .get_panel_config_defaults = dcn315_get_panel_config_defaults, .get_power_profile = dcn315_get_power_profile, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn315_resource_construct( @@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 568094827212..636110e48d01 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format) static int dcn316_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context( const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn316_resource_construct( @@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index bb0dae0be5b8..3965a7f1b64b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -69,7 +69,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -739,6 +739,7 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, .enable_legacy_fast_update = false, + .disable_stutter_for_wm_program = true }; static struct dce_aux *dcn32_aux_engine_create( @@ -1742,7 +1743,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, } } -static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) +static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode) { bool out = false; @@ -1767,7 +1768,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val goto validate_fail; DC_FP_START(); - out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode); DC_FP_END(); if (pipe_cnt == 0) @@ -1778,7 +1779,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1809,7 +1810,7 @@ validate_out: enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status; @@ -1827,11 +1828,11 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1846,14 +1847,14 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1862,7 +1863,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1878,7 +1879,7 @@ int dcn32_populate_dml_pipes_from_context( int num_subvp_none = 0; int odm_slice_count; - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); /* For single display subvp, look for subvp main so if we have phantom * pipe, we can set odm policy to match main pipe @@ -1960,7 +1961,7 @@ int dcn32_populate_dml_pipes_from_context( /* Only populate DML input with subvp info for full updates. * This is just a workaround -- needs a proper fix. */ - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; @@ -2061,21 +2062,15 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -2257,7 +2252,7 @@ static bool dcn32_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -2276,6 +2271,7 @@ static bool dcn32_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2505,6 +2501,8 @@ static bool dcn32_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2519,7 +2517,6 @@ static bool dcn32_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2551,6 +2548,10 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: @@ -2852,7 +2853,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; - free_pipe->hblank_borrow = otg_master->hblank_borrow; + free_pipe->dsc_padding_params = otg_master->dsc_padding_params; if (free_pipe->stream->timing.flags.DSC == 1) { dcn20_acquire_dsc(free_pipe->stream->ctx->dc, &new_ctx->res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index d60ed77eda80..99f0432288b4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -100,12 +100,12 @@ void dcn32_add_phantom_pipes(struct dc *dc, enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -1141,7 +1141,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \ SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \ SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \ - SRI_ARR(HUBP_CLK_CNTL, HUBP, id) + SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \ + SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id) #define HUBP_REG_LIST_DCN2_COMMON_RI(id) \ HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \ SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \ @@ -1229,7 +1230,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, SR(DCHUBBUB_ARB_MALL_CNTL), \ SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \ SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \ - SR(SDPIF_REQUEST_RATE_LIMIT) + SR(SDPIF_REQUEST_RATE_LIMIT), \ + SR(DCHUBBUB_SDPIF_CFG0) /* DCCG */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 7db1f7a5613f..ad214986f7ac 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -72,7 +72,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1761,8 +1755,8 @@ static bool dcn321_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; @@ -1780,6 +1774,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2004,6 +1999,8 @@ static bool dcn321_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2018,7 +2015,6 @@ static bool dcn321_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2046,6 +2042,10 @@ static bool dcn321_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index e01aa2f2e13e..07552445e424 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -61,7 +61,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1734,15 +1734,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn35_resource_construct( @@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1893,6 +1895,7 @@ static bool dcn35_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2151,12 +2154,13 @@ static bool dcn35_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 4ebe4e00a4f8..cb0478a9a34d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -40,7 +40,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1714,15 +1714,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn351_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn351_resource_construct( @@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1865,6 +1867,7 @@ static bool dcn351_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2122,13 +2125,14 @@ static bool dcn351_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index db36b8f9ce65..126090c9bb8a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -40,7 +40,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1715,15 +1715,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn36_resource_construct( @@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1866,6 +1868,7 @@ static bool dcn36_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2124,12 +2127,13 @@ static bool dcn36_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index f420c4dafa03..1d18807e4749 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -50,7 +50,7 @@ #include "dml/display_mode_vba.h" #include "dcn401/dcn401_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "link_enc_cfg.h" #include "dcn31/dcn31_panel_cntl.h" @@ -70,7 +70,6 @@ #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml/dcn401/dcn401_fpu.h" #include "dc_state_priv.h" @@ -709,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .use_max_lb = true, .force_disable_subvp = false, + .disable_force_pstate_allow_on_hw_release = false, .exit_idle_opt_for_cursor_updates = true, .using_dml2 = true, .using_dml21 = true, @@ -1608,10 +1608,6 @@ static struct dc_cap_funcs cap_funcs = { static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - /* re-calculate the available MALL size if required */ if (bw_params->num_channels > 0) { dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( @@ -1622,15 +1618,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b DC_FP_START(); - dcn401_update_bw_bounding_box_fpu(dc, bw_params); - - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1644,7 +1636,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status = DC_OK; @@ -1662,9 +1654,9 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1679,12 +1671,12 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1707,6 +1699,9 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) + pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment) link_enc = link_enc_cfg_get_link_enc(link); if (link_enc) @@ -1957,8 +1952,30 @@ static bool dcn401_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->config.use_spl = true; dc->config.prefer_easf = true; + + dc->config.dcn_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_sharpness_range.sdr_rgb_max = 1750; + dc->config.dcn_sharpness_range.sdr_rgb_mid = 750; + dc->config.dcn_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500; + + dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250; + dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250; + dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500; + dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ @@ -2177,6 +2194,8 @@ static bool dcn401_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2195,7 +2214,6 @@ static bool dcn401_resource_construct( dc->config.sdpif_request_limit_words_per_umc = 16; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.map_dc_pipes_with_callbacks = true; @@ -2228,6 +2246,10 @@ static bool dcn401_resource_construct( /* SPL */ dc->caps.scl_caps.sharpener_support = true; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index dc52a30991af..0fc66487d800 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -24,7 +24,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); @@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \ HUBP_3DLUT_FL_REG_LIST_DCN401(id), \ SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \ - SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id) + SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \ + SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id) /* ABM */ #define ABM_DCN401_REG_LIST_RI(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile new file mode 100644 index 000000000000..bc93356a0b5b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MIT +# +# Copyright 2025 Advanced Micro Devices, Inc. +# Makefile for bounding box component. +# Floating point required due to nature of bounding box values + +soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU) +soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU) + +CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags) + +soc_and_ip_translator := soc_and_ip_translator.o +soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o + +AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator)) + +AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator) diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c new file mode 100644 index 000000000000..3190c76eb482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#include "dcn401_soc_and_ip_translator.h" +#include "bounding_boxes/dcn4_soc_bb.h" + +/* soc_and_ip_translator component used to get up-to-date values for bounding box. + * Bounding box values are stored in several locations and locations can vary with DCN revision. + * This component provides an interface to get DCN-specific bounding box values. + */ + +static void get_default_soc_bb(struct dml2_soc_bb *soc_bb) +{ + memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb)); + memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters)); +} + +/* + * DC clock table is obtained from SMU during runtime. + * SMU stands for System Management Unit. It is a power management processor. + * It owns the initialization of dc's clock table and programming of clock values + * based on dc's requests. + * Our clock values in base soc bb is a dummy placeholder. The real clock values + * are retrieved from SMU firmware to dc clock table at runtime. + * This function overrides our dummy placeholder values with real values in dc + * clock table. + */ +static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table( + struct dml2_soc_state_table *dml_clk_table, + const struct clk_bw_params *dc_bw_params, + bool use_clock_dc_limits) +{ + int i; + const struct clk_limit_table *dc_clk_table; + + if (dc_bw_params == NULL) + /* skip if bw params could not be obtained from smu */ + return; + + dc_clk_table = &dc_bw_params->clk_table; + + /* dcfclk */ + if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { + dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dcfclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && + dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { + dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; + dml_clk_table->dcfclk.num_clk_values = i + 1; + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.num_clk_values = i; + } + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; + } + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = 0; + } + } + } + + /* fclk */ + if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { + dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->fclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && + dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { + dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; + dml_clk_table->fclk.num_clk_values = i + 1; + } else { + dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.num_clk_values = i; + } + } else { + dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; + } + } else { + dml_clk_table->fclk.clk_values_khz[i] = 0; + } + } + } + + /* uclk */ + if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { + dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->uclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && + dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { + dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; + dml_clk_table->uclk.num_clk_values = i + 1; + } else { + dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.num_clk_values = i; + } + } else { + dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; + } + } else { + dml_clk_table->uclk.clk_values_khz[i] = 0; + } + } + } + + /* dispclk */ + if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { + dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dispclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && + dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { + dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; + dml_clk_table->dispclk.num_clk_values = i + 1; + } else { + dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.num_clk_values = i; + } + } else { + dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; + } + } else { + dml_clk_table->dispclk.clk_values_khz[i] = 0; + } + } + } + + /* dppclk */ + if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { + dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dppclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && + dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { + dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; + dml_clk_table->dppclk.num_clk_values = i + 1; + } else { + dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.num_clk_values = i; + } + } else { + dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; + } + } else { + dml_clk_table->dppclk.clk_values_khz[i] = 0; + } + } + } + + /* dtbclk */ + if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { + dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dtbclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && + dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { + dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; + dml_clk_table->dtbclk.num_clk_values = i + 1; + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.num_clk_values = i; + } + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; + } + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = 0; + } + } + } + + /* socclk */ + if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { + dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->socclk.num_clk_values) { + if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && + dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { + dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; + dml_clk_table->socclk.num_clk_values = i + 1; + } else { + dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.num_clk_values = i; + } + } else { + dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; + } + } else { + dml_clk_table->socclk.clk_values_khz[i] = 0; + } + } + } + + /* dram config */ + dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels; + dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes; +} + +void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config) +{ + soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000; + soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024); + + if (dc->clk_mgr->funcs->is_smu_present && + dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) { + dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table, + dc->clk_mgr->bw_params, + config->use_clock_dc_limits); + } +} + +void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc) +{ + soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; + + /* latencies in vbios are platform specific and should be used if provided */ + if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns) + soc_bb->power_management_parameters.dram_clk_change_blackout_us = + dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0; + + if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns) + soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = + dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0; + + if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns) + soc_bb->power_management_parameters.stutter_exit_latency_us = + dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0; +} + +void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc) +{ + /* set if the value is provided */ + if (dc->bb_overrides.sr_exit_time_ns) + soc_bb->power_management_parameters.stutter_exit_latency_us = + dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) + soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + + if (dc->bb_overrides.dram_clock_change_latency_ns) + soc_bb->power_management_parameters.dram_clk_change_blackout_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + + if (dc->bb_overrides.fclk_clock_change_latency_ns) + soc_bb->power_management_parameters.fclk_change_blackout_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0; + + //Z8 values not expected nor used on DCN401 but still added for completeness + if (dc->bb_overrides.sr_exit_z8_time_ns) + soc_bb->power_management_parameters.z8_stutter_exit_latency_us = + dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) + soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us = + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; +} + +static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config) +{ + /* Individual modification can be overwritten even if it was obtained by a previous function. + * Modifications are acquired in order of priority (lowest to highest). + */ + dc_assert_fp_enabled(); + + dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config); + dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc); + dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc); +} + +void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config) +{ + //get default soc_bb with static values + get_default_soc_bb(soc_bb); + //update soc_bb values with more accurate values + apply_soc_bb_updates(soc_bb, dc, config); +} + +static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps) +{ + *ip_caps = dml2_dcn401_max_ip_caps; +} + +static struct soc_and_ip_translator_funcs dcn401_translator_funcs = { + .get_soc_bb = dcn401_get_soc_bb, + .get_ip_caps = dcn401_get_ip_caps, +}; + +void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator) +{ + soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h new file mode 100644 index 000000000000..21d842857601 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_ +#define _DCN401_SOC_AND_IP_TRANSLATOR_H_ + +#include "core_types.h" +#include "dc.h" +#include "clk_mgr.h" +#include "soc_and_ip_translator.h" +#include "dml2/dml21/inc/dml_top_soc_parameter_types.h" + +void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator); + +/* Functions that can be re-used by higher DCN revisions of this component */ +void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config); +void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config); +void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc); +void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc); + +#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c new file mode 100644 index 000000000000..c9e224d262c9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#include "dcn42_soc_and_ip_translator.h" +#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h" +#include "bounding_boxes/dcn42_soc_bb.h" + +/* soc_and_ip_translator component used to get up-to-date values for bounding box. + * Bounding box values are stored in several locations and locations can vary with DCN revision. + * This component provides an interface to get DCN-specific bounding box values. + */ + +static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps) +{ + *ip_caps = dml2_dcn42_max_ip_caps; +} + +static struct soc_and_ip_translator_funcs dcn42_translator_funcs = { + .get_soc_bb = dcn401_get_soc_bb, + .get_ip_caps = dcn42_get_ip_caps, +}; + +void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator) +{ + soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h new file mode 100644 index 000000000000..914dcbb369a7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_ +#define _DCN42_SOC_AND_IP_TRANSLATOR_H_ + +#include "core_types.h" +#include "dc.h" +#include "clk_mgr.h" +#include "dml_top_soc_parameter_types.h" +#include "soc_and_ip_translator.h" + +void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator); + +#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c new file mode 100644 index 000000000000..0fc0e5a6c171 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#include "soc_and_ip_translator.h" +#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h" + +static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator, + enum dce_version dc_version) +{ + switch (dc_version) { + case DCN_VERSION_4_01: + dcn401_construct_soc_and_ip_translator(soc_and_ip_translator); + break; + default: + break; + } +} + +struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version) +{ + struct soc_and_ip_translator *soc_and_ip_translator; + + soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL); + if (!soc_and_ip_translator) + return NULL; + + dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version); + + return soc_and_ip_translator; +} + +void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator) +{ + kfree(*soc_and_ip_translator); + *soc_and_ip_translator = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index e0008c5f08ad..55b929ca7982 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; struct spl_rect mpc_rec; - if (use_recout_width_aligned) { + if (spl_in->basic_in.custom_width != 0) { + mpc_rec.width = spl_in->basic_in.custom_width; + mpc_rec.x = spl_in->basic_in.custom_x; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + } else if (use_recout_width_aligned) { mpc_rec.width = recout_width_align; if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) { mpc_rec.width = plane_clip_rec->width % recout_width_align; @@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( /* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as: */ - if (mpc_slice_idx > epimo) { + if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) { mpc_rec.x += mpc_slice_idx - epimo - 1; mpc_rec.width += 1; } @@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i odm_rec.x = odm_slice_width * odm_slice_idx; odm_rec.width = is_last_odm_slice ? - /* last slice width is the reminder of h_active */ - h_active - odm_slice_width * (odm_slice_count - 1) : - /* odm slice width is the floor of h_active / count */ - odm_slice_width; + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; odm_rec.y = 0; odm_rec.height = v_active; @@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, /* Calculate number of tap with adaptive scaling off */ static void spl_get_taps_non_adaptive_scaler( - struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale) + struct spl_scratch *spl_scratch, + const struct spl_taps *in_taps, + bool is_subsampled) { bool check_max_downscale = false; @@ -945,14 +952,15 @@ static void spl_get_taps_non_adaptive_scaler( SPL_ASSERT(check_max_downscale); - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) spl_scratch->scl_data.taps.h_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled) spl_scratch->scl_data.taps.v_taps_c = 1; + } /* Calculate optimal number of taps */ @@ -965,15 +973,13 @@ static bool spl_get_optimal_number_of_taps( unsigned int max_taps_y, max_taps_c; unsigned int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - bool skip_easf = false; - bool always_scale = spl_in->basic_out.always_scale; + bool skip_easf = false; bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format); - if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && spl_scratch->scl_data.viewport.width > max_downscale_src_width) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -982,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( /* Disable adaptive scaler and sharpener when integer scaling is enabled */ if (spl_in->scaling_quality.integer_scaling) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -997,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + if (skip_easf) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); + } else { if (spl_is_video_format(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; @@ -1124,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps( (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) { spl_scratch->scl_data.taps.h_taps = 1; spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; @@ -1149,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps( if ((!*enable_easf_v) && !is_subsampled && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) spl_scratch->scl_data.taps.v_taps_c = 1; + } } return true; diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 36a284305a70..23d254dea18f 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -460,6 +460,8 @@ struct basic_in { enum spl_color_space color_space; // Color Space unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this? + int custom_width; // Width for non-standard segmentation - used when != 0 + int custom_x; // Start x for non-standard segmentation - used when custom_width != 0 }; // Basic output information diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index ad088d70e189..6ffc74fc9dcd 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -44,6 +44,11 @@ static void virtual_stream_encoder_dvi_set_stream_attribute( struct dc_crtc_timing *crtc_timing, bool is_dual_link) {} +static void virtual_stream_encoder_lvds_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing) +{} + static void virtual_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) @@ -115,6 +120,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = { virtual_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = virtual_stream_encoder_dvi_set_stream_attribute, + .lvds_set_stream_attribute = + virtual_stream_encoder_lvds_set_stream_attribute, .set_throttled_vcp_size = virtual_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 3f3fa1b6a69e..338fdc651f2c 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -129,7 +129,9 @@ enum dmub_window_id { DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_SCRATCH_MEM, + DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, + DMUB_WINDOW_LSDMA_BUFFER, DMUB_WINDOW_TOTAL, }; @@ -314,6 +316,7 @@ struct dmub_srv_hw_params { bool disable_sldo_opt; bool enable_non_transparent_setconfig; bool lower_hbr3_phy_ssc; + bool override_hbr3_pll_vco; }; /** @@ -355,6 +358,7 @@ struct dmub_diagnostic_data { uint8_t is_traceport_en : 1; uint8_t is_cw0_enabled : 1; uint8_t is_cw6_enabled : 1; + uint8_t is_pwait : 1; }; struct dmub_srv_inbox { @@ -539,6 +543,7 @@ struct dmub_srv { uint32_t fw_version; bool is_virtual; struct dmub_fb scratch_mem_fb; + struct dmub_fb ib_mem_gart; volatile struct dmub_shared_state_feature_block *shared_state; volatile const struct dmub_fw_state *fw_state; @@ -563,6 +568,7 @@ struct dmub_srv { bool sw_init; bool hw_init; + bool dpia_supported; uint64_t fb_base; uint64_t fb_offset; @@ -576,6 +582,7 @@ struct dmub_srv { enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; + struct dmub_fb lsdma_rb_fb; }; /** @@ -592,6 +599,8 @@ struct dmub_notification { enum dmub_notification_type type; uint8_t link_index; uint8_t result; + /* notify instance from DMUB */ + uint8_t instance; bool pending_notification; union { struct aux_reply_data aux_reply; @@ -602,14 +611,6 @@ struct dmub_notification { }; }; -/* enum dmub_ips_mode - IPS mode identifier */ -enum dmub_ips_mode { - DMUB_IPS_MODE_IPS1_MAX = 0, - DMUB_IPS_MODE_IPS2, - DMUB_IPS_MODE_IPS1_RCG, - DMUB_IPS_MODE_IPS1_ONO2_ON -}; - /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b66bd10cdc9b..92248224b713 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -104,6 +104,14 @@ */ #define DMUB_MAX_FPO_STREAMS 4 +/* Define to ensure that the "common" members always appear in the same + * order in different structs for back compat purposes + */ +#define COMMON_STREAM_STATIC_SUB_STATE \ + struct dmub_fams2_cmd_legacy_stream_static_state legacy; \ + struct dmub_fams2_cmd_subvp_stream_static_state subvp; \ + struct dmub_fams2_cmd_drr_stream_static_state drr; + /* Maximum number of streams on any ASIC. */ #define DMUB_MAX_STREAMS 6 @@ -291,6 +299,31 @@ union dmub_addr { } u; /*<< Low/high bit access */ uint64_t quad_part; /*<< 64 bit address */ }; + +/* Flattened structure containing SOC BB parameters stored in the VBIOS + * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters. + * This also prevents alighment issues when new parameters are added to the SoC BB. + * The following parameters should be added since these values can't be obtained elsewhere: + * -dml2_soc_power_management_parameters + * -dml2_soc_vmin_clock_limits + */ +struct dmub_soc_bb_params { + uint32_t dram_clk_change_blackout_ns; + uint32_t dram_clk_change_read_only_ns; + uint32_t dram_clk_change_write_only_ns; + uint32_t fclk_change_blackout_ns; + uint32_t g7_ppt_blackout_ns; + uint32_t stutter_enter_plus_exit_latency_ns; + uint32_t stutter_exit_latency_ns; + uint32_t z8_stutter_enter_plus_exit_latency_ns; + uint32_t z8_stutter_exit_latency_ns; + uint32_t z8_min_idle_time_ns; + uint32_t type_b_dram_clk_change_blackout_ns; + uint32_t type_b_ppt_blackout_ns; + uint32_t vmin_limit_dispclk_khz; + uint32_t vmin_limit_dcfclk_khz; + uint32_t g7_temperature_read_blackout_ns; +}; #pragma pack(pop) /** @@ -757,11 +790,29 @@ enum dmub_ips_rcg_disable_type { DMUB_IPS_RCG_DISABLE = 3 }; +enum dmub_ips_in_vpb_disable_type { + DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour + DMUB_IPS_VPB_DISABLE_ALL = 1, + DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2, + DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG +}; + #define DMUB_IPS1_ALLOW_MASK 0x00000001 #define DMUB_IPS2_ALLOW_MASK 0x00000002 #define DMUB_IPS1_COMMIT_MASK 0x00000004 #define DMUB_IPS2_COMMIT_MASK 0x00000008 +enum dmub_ips_comand_type { + /** + * Start/stop IPS residency measurements for a given IPS mode + */ + DMUB_CMD__IPS_RESIDENCY_CNTL = 0, + /** + * Query IPS residency information for a given IPS mode + */ + DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -792,7 +843,8 @@ union dmub_fw_boot_options { uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ - uint32_t reserved : 6; /**< reserved */ + uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */ + uint32_t reserved : 5; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -856,7 +908,9 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */ - uint32_t reserved_bits : 24; /**< Reversed bits */ + uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */ + uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */ + uint32_t reserved_bits : 22; /**< Reversed bits */ } bits; uint32_t all; }; @@ -1508,6 +1562,16 @@ enum dmub_cmd_type { */ DMUB_CMD__FUSED_IO = 89, + /** + * Command type used for all LSDMA commands. + */ + DMUB_CMD__LSDMA = 90, + + /** + * Command type use for all IPS commands. + */ + DMUB_CMD__IPS = 91, + DMUB_CMD__VBIOS = 128, }; @@ -1918,6 +1982,154 @@ struct dmub_rb_cmd_fams2_flip { struct dmub_fams2_flip_info flip_info; }; +struct dmub_cmd_lsdma_data { + union { + struct lsdma_init_data { + union dmub_addr gpu_addr_base; + uint32_t ring_size; + } init_data; + struct lsdma_tiled_copy_data { + uint32_t src_addr_lo; + uint32_t src_addr_hi; + + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t src_width : 16; + uint32_t src_height : 16; + + uint32_t dst_width : 16; + uint32_t dst_height : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_swizzle_mode : 5; + uint32_t src_mip_max : 5; + uint32_t src_mip_id : 5; + uint32_t dst_mip_max : 5; + uint32_t dst_swizzle_mode : 5; + uint32_t dst_mip_id : 5; + uint32_t tmz : 1; + uint32_t dcc : 1; + + uint32_t data_format : 6; + uint32_t padding1 : 4; + uint32_t dst_element_size : 3; + uint32_t num_type : 3; + uint32_t src_element_size : 3; + uint32_t write_compress : 2; + uint32_t cache_policy_dst : 2; + uint32_t cache_policy_src : 2; + uint32_t read_compress : 2; + uint32_t src_dim : 2; + uint32_t dst_dim : 2; + uint32_t max_uncom : 1; + + uint32_t max_com : 2; + uint32_t padding : 30; + } tiled_copy_data; + struct lsdma_linear_copy_data { + uint32_t src_lo; + uint32_t src_hi; + + uint32_t dst_lo; + uint32_t dst_hi; + + uint32_t count : 30; + uint32_t cache_policy_dst : 2; + + uint32_t tmz : 1; + uint32_t cache_policy_src : 2; + uint32_t padding : 29; + } linear_copy_data; + struct lsdma_linear_sub_window_copy_data { + uint32_t src_lo; + uint32_t src_hi; + + uint32_t dst_lo; + uint32_t dst_hi; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_pitch : 16; + uint32_t dst_pitch : 16; + + uint32_t src_slice_pitch; + uint32_t dst_slice_pitch; + + uint32_t tmz : 1; + uint32_t element_size : 3; + uint32_t src_cache_policy : 3; + uint32_t dst_cache_policy : 3; + uint32_t reserved0 : 22; + } linear_sub_window_copy_data; + struct lsdma_reg_write_data { + uint32_t reg_addr; + uint32_t reg_data; + } reg_write_data; + struct lsdma_pio_copy_data { + uint32_t src_lo; + uint32_t src_hi; + + uint32_t dst_lo; + uint32_t dst_hi; + + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + } pio_copy_data; + struct lsdma_pio_constfill_data { + uint32_t dst_lo; + uint32_t dst_hi; + + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + + uint32_t data; + } pio_constfill_data; + + uint32_t all[14]; + } u; +}; + +struct dmub_rb_cmd_lsdma { + struct dmub_cmd_header header; + struct dmub_cmd_lsdma_data lsdma_data; +}; + struct dmub_optc_state_v2 { uint32_t v_total_min; uint32_t v_total_max; @@ -1949,6 +2161,28 @@ enum fams2_stream_type { FAMS2_STREAM_TYPE_SUBVP = 4, }; +struct dmub_rect16 { + /** + * Dirty rect x offset. + */ + uint16_t x; + + /** + * Dirty rect y offset. + */ + uint16_t y; + + /** + * Dirty rect width. + */ + uint16_t width; + + /** + * Dirty rect height. + */ + uint16_t height; +}; + /* static stream state */ struct dmub_fams2_legacy_stream_static_state { uint8_t vactive_det_fill_delay_otg_vlines; @@ -2021,11 +2255,13 @@ union dmub_fams2_stream_static_sub_state { }; //v0 union dmub_fams2_cmd_stream_static_sub_state { - struct dmub_fams2_cmd_legacy_stream_static_state legacy; - struct dmub_fams2_cmd_subvp_stream_static_state subvp; - struct dmub_fams2_cmd_drr_stream_static_state drr; + COMMON_STREAM_STATIC_SUB_STATE }; //v1 +union dmub_fams2_stream_static_sub_state_v2 { + COMMON_STREAM_STATIC_SUB_STATE +}; //v2 + struct dmub_fams2_stream_static_state { enum fams2_stream_type type; uint32_t otg_vline_time_ns; @@ -2091,7 +2327,7 @@ struct dmub_fams2_cmd_stream_static_base_state { struct dmub_fams2_stream_static_state_v1 { struct dmub_fams2_cmd_stream_static_base_state base; - union dmub_fams2_cmd_stream_static_sub_state sub_state; + union dmub_fams2_stream_static_sub_state_v2 sub_state; }; //v1 /** @@ -2128,6 +2364,7 @@ struct dmub_cmd_fams2_global_config { union dmub_fams2_global_feature_config features; uint32_t recovery_timeout_us; uint32_t hwfq_flip_programming_delay_us; + uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target }; union dmub_cmd_fams2_config { @@ -2139,6 +2376,11 @@ union dmub_cmd_fams2_config { } stream_v1; //v1 }; +struct dmub_fams2_config_v2 { + struct dmub_cmd_fams2_global_config global; + struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1 +}; + /** * DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy) */ @@ -2148,6 +2390,22 @@ struct dmub_rb_cmd_fams2 { }; /** + * Indirect buffer descriptor + */ +struct dmub_ib_data { + union dmub_addr src; // location of indirect buffer in memory + uint16_t size; // indirect buffer size in bytes +}; + +/** + * DMUB rb command definition for commands passed over indirect buffer + */ +struct dmub_rb_cmd_ib { + struct dmub_cmd_header header; + struct dmub_ib_data ib_data; +}; + +/** * enum dmub_cmd_idle_opt_type - Idle optimization command type. */ enum dmub_cmd_idle_opt_type { @@ -2170,6 +2428,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware notify power state. */ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, + + /** + * DCN notify to release HW. + */ + DMUB_CMD__IDLE_OPT_RELEASE_HW = 4, }; /** @@ -2315,7 +2578,8 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */ uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */ uint8_t reserved1; /**< For future use */ - uint8_t reserved2[3]; /**< For future use */ + uint8_t skip_phy_ssc_reduction; + uint8_t reserved2[2]; /**< For future use */ uint32_t reserved3[11]; /**< For future use */ }; @@ -2933,6 +3197,7 @@ enum dmub_cmd_fams_type { DMUB_CMD__FAMS2_CONFIG = 4, DMUB_CMD__FAMS2_DRR_UPDATE = 5, DMUB_CMD__FAMS2_FLIP = 6, + DMUB_CMD__FAMS2_IB_CONFIG = 7, }; /** @@ -3756,6 +4021,10 @@ enum dmub_cmd_replay_type { */ DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, /** + * Set version + */ + DMUB_CMD__REPLAY_SET_VERSION = 9, + /** * Set Replay General command. */ DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16, @@ -3785,6 +4054,10 @@ struct dmub_alpm_auxless_data { uint16_t lfps_t1_t2_override_us; short lfps_t1_t2_offset_us; uint8_t lttpr_count; + /* + * Padding to align structure to 4 byte boundary. + */ + uint8_t pad[1]; }; /** @@ -3861,6 +4134,79 @@ struct dmub_cmd_replay_copy_settings_data { * Use for AUX-less ALPM LFPS wake operation */ struct dmub_alpm_auxless_data auxless_alpm_data; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * Determines if fast resync in ultra sleep mode is enabled/disabled. + */ + uint8_t replay_support_fast_resync_in_ultra_sleep_mode; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[1]; +}; + + +/** + * Replay versions. + */ +enum replay_version { + /** + * FreeSync Replay + */ + REPLAY_VERSION_FREESYNC_REPLAY = 0, + /** + * Panel Replay + */ + REPLAY_VERSION_PANEL_REPLAY = 1, + /** + * Replay not supported. + */ + REPLAY_VERSION_UNSUPPORTED = 0xFF, +}; + +/** + * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command. + */ +struct dmub_cmd_replay_set_version_data { + /** + * Panel Instance. + * Panel instance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * PSR version that FW should implement. + */ + enum replay_version version; + /** + * PSR control version. + */ + uint8_t cmd_version; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_VERSION command. + */ +struct dmub_rb_cmd_replay_set_version { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command. + */ + struct dmub_cmd_replay_set_version_data replay_set_version_data; }; /** @@ -4215,6 +4561,10 @@ union dmub_replay_cmd_set { */ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; /** + * Definition of DMUB_CMD__REPLAY_SET_VERSION command data. + */ + struct dmub_cmd_replay_set_version_data version_data; + /** * Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data. */ struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data; @@ -4416,6 +4766,41 @@ enum dmub_cmd_abm_type { DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11, }; +/** + * LSDMA command sub-types. + */ +enum dmub_cmd_lsdma_type { + /** + * Initialize parameters for LSDMA. + * Ring buffer is mapped to the ring buffer + */ + DMUB_CMD__LSDMA_INIT_CONFIG = 0, + /** + * LSDMA copies data from source to destination linearly + */ + DMUB_CMD__LSDMA_LINEAR_COPY = 1, + /** + * LSDMA copies data from source to destination linearly in sub window + */ + DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2, + /** + * Send the tiled-to-tiled copy command + */ + DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3, + /** + * Send the poll reg write command + */ + DMUB_CMD__LSDMA_POLL_REG_WRITE = 4, + /** + * Send the pio copy command + */ + DMUB_CMD__LSDMA_PIO_COPY = 5, + /** + * Send the pio constfill command + */ + DMUB_CMD__LSDMA_PIO_CONSTFILL = 6, +}; + struct abm_ace_curve { /** * @offsets: ACE curve offsets. @@ -5621,6 +6006,64 @@ struct dmub_rb_cmd_assr_enable { }; /** + * Current definition of "ips_mode" from driver + */ +enum ips_residency_mode { + IPS_RESIDENCY__IPS1_MAX, + IPS_RESIDENCY__IPS2, + IPS_RESIDENCY__IPS1_RCG, + IPS_RESIDENCY__IPS1_ONO2_ON, + IPS_RESIDENCY__IPS1_Z8_RETENTION, + IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS, + IPS_RESIDENCY__PG_ONO_CURRENT_STATE +}; + +#define NUM_IPS_HISTOGRAM_BUCKETS 16 + +/** + * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats + */ +struct dmub_ips_residency_info { + uint32_t residency_millipercent; + uint32_t entry_counter; + uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS]; + uint64_t total_time_us; + uint64_t total_inactive_time_us; + uint32_t ono_pg_state_at_collection; + uint32_t ono_pg_state_last_seen_in_ips; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command. + */ +struct dmub_cmd_ips_residency_cntl_data { + uint8_t panel_inst; + uint8_t start_measurement; + uint8_t padding[2]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_residency_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_ips_residency_cntl_data cntl_data; +}; + +/** + * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command. + */ +struct dmub_cmd_ips_query_residency_info_data { + union dmub_addr dest; + uint32_t size; + uint32_t ips_mode; + uint8_t panel_inst; + uint8_t padding[3]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_query_residency_info { + struct dmub_cmd_header header; + struct dmub_cmd_ips_query_residency_info_data info_data; +}; + +/** * union dmub_rb_cmd - DMUB inbox command. */ union dmub_rb_cmd { @@ -5884,6 +6327,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command. */ struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state; + /** + * Definition of a DMUB_CMD__REPLAY_SET_VERSION command. + */ + struct dmub_rb_cmd_replay_set_version replay_set_version; /* * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. */ @@ -5926,13 +6373,25 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; + struct dmub_rb_cmd_fams2 fams2_config; + struct dmub_rb_cmd_ib ib_fams2_config; + struct dmub_rb_cmd_fams2_drr_update fams2_drr_update; struct dmub_rb_cmd_fams2_flip fams2_flip; struct dmub_rb_cmd_fused_io fused_io; + + /** + * Definition of a DMUB_CMD__LSDMA command. + */ + struct dmub_rb_cmd_lsdma lsdma; + + struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; + + struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; }; /** @@ -6080,15 +6539,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { - uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt); - const uint64_t *src = (const uint64_t *)cmd; + uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; + const uint8_t *src = (const uint8_t *)cmd; uint8_t i; + if (rb->capacity == 0) + return false; + if (dmub_rb_full(rb)) return false; // copying data - for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + for (i = 0; i < DMUB_RB_CMD_SIZE; i++) *dst++ = *src++; rb->wrpt += DMUB_RB_CMD_SIZE; @@ -6113,6 +6575,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb, uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; const uint8_t *src = (const uint8_t *)cmd; + if (rb->capacity == 0) + return false; + if (dmub_rb_full(rb)) return false; @@ -6158,6 +6623,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb, uint32_t num_cmds, uint32_t *next_rptr) { + if (rb->capacity == 0) + return; + *next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds; if (*next_rptr >= rb->capacity) @@ -6221,6 +6689,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb, */ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) { + if (rb->capacity == 0) + return false; + if (dmub_rb_empty(rb)) return false; @@ -6245,6 +6716,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) uint32_t rptr = rb->rptr; uint32_t wptr = rb->wrpt; + if (rb->capacity == 0) + return; + while (rptr != wptr) { uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr); uint8_t i; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index a308bd604677..4777c7203b2c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -377,6 +377,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported; boot_options.bits.power_optimization = params->power_optimization; boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; + boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; @@ -416,7 +417,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub) void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -466,6 +467,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index e7056205b050..ce041f6239dc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -89,44 +89,50 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 30; - uint32_t in_reset, scratch, i; + const uint32_t timeout = 100000; + uint32_t in_reset, is_enabled, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (in_reset == 0) { + if (in_reset == 0 && is_enabled != 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - * - * This is mostly bound by the PHY disable sequence. - * Each register check will be greater than 1us, so - * don't bother using udelay. - */ - for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; + + udelay(1); } for (i = 0; i < timeout; ++i) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; + + udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + if (is_enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + udelay(1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -135,7 +141,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); - /* Clear the GPINT command manually so we don't reset again. */ + /* Clear the GPINT command manually so we don't send anything during boot. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } @@ -419,8 +425,8 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub) void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; - uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; + uint32_t is_traceport_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; if (!dmub) @@ -470,18 +476,15 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - dmub->debug.is_dmcub_secure_reset = is_sec_reset; - REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); dmub->debug.is_traceport_en = is_traceport_enabled; - REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - dmub->debug.is_cw0_enabled = is_cw0_enabled; - REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); dmub->debug.is_cw6_enabled = is_cw6_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index 1a229450c53d..daf81027d663 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -89,6 +89,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION5_OFFSET) \ DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION6_OFFSET) \ + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -155,6 +158,8 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ @@ -162,7 +167,8 @@ struct dmub_srv; DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn32_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 72a0f078cd1a..834e5434ccb8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) uint32_t in_reset, is_enabled, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (in_reset == 0) { + if (in_reset == 0 && is_enabled != 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - */ - for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; @@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) /* Force reset in case we timed out, DMCUB is likely hung. */ } - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (is_enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } @@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub) LONO_SOCCLK_GATE_DISABLE, 1, LONO_DMCUBCLK_GATE_DISABLE, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); - udelay(1); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - udelay(1); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); } @@ -410,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub) void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; - union dmub_fw_boot_options cur_boot_options = {0}; - cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub); + if (!dmub->dpia_supported) { + dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia; + } boot_options.bits.z10_disable = params->disable_z10; boot_options.bits.dpia_supported = params->dpia_supported; - boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia; + boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia; boot_options.bits.usb4_cm_version = params->usb4_cm_version; boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported; boot_options.bits.power_optimization = params->power_optimization; @@ -464,7 +455,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; uint32_t is_traceport_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -515,6 +506,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index 2575dbc448f7..b31adbd0d685 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -413,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub) void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -464,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index acca7943a8c8..b17a19400c06 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -65,6 +65,12 @@ /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (1024) +/* Default indirect buffer size. */ +#define DMUB_IB_MEM_SIZE (1280) + +/* Default LSDMA ring buffer size. */ +#define DMUB_LSDMA_RB_SIZE (64 * 1024) + /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ @@ -559,7 +565,9 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); + window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -645,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; @@ -655,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_INVALID; if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { + !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { ASSERT(0); return DMUB_STATUS_INVALID; } @@ -741,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->ib_mem_gart = *ib_mem_gart; + if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index 567c5b1aeb7a..e7a58b140388 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, switch (cmd.cmd_common.header.type) { case DMUB_OUT_CMD__DP_AUX_REPLY: notify->type = DMUB_NOTIFICATION_AUX_REPLY; - notify->link_index = cmd.dp_aux_reply.control.instance; + notify->instance = cmd.dp_aux_reply.control.instance; notify->result = cmd.dp_aux_reply.control.result; dmub_memcpy((void *)¬ify->aux_reply, (void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data)); @@ -84,17 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, notify->type = DMUB_NOTIFICATION_HPD_IRQ; } - notify->link_index = cmd.dp_hpd_notify.hpd_data.instance; + notify->instance = cmd.dp_hpd_notify.hpd_data.instance; notify->result = AUX_RET_SUCCESS; break; case DMUB_OUT_CMD__SET_CONFIG_REPLY: notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY; - notify->link_index = cmd.set_config_reply.set_config_reply_control.instance; + notify->instance = cmd.set_config_reply.set_config_reply_control.instance; notify->sc_status = cmd.set_config_reply.set_config_reply_control.status; break; case DMUB_OUT_CMD__DPIA_NOTIFICATION: notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; - notify->link_index = cmd.dpia_notification.payload.header.instance; + notify->instance = cmd.dpia_notification.payload.header.instance; break; case DMUB_OUT_CMD__HPD_SENSE_NOTIFY: notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 5fc29164e4b4..8aea50aa9533 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -213,6 +213,11 @@ enum { #endif #define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH #define DEVICE_ID_NV_143F 0x143F +#define DEVICE_ID_NV_13F9 0x13F9 +#define DEVICE_ID_NV_13FA 0x13FA +#define DEVICE_ID_NV_13FB 0x13FB +#define DEVICE_ID_NV_13FC 0x13FC +#define DEVICE_ID_NV_13DB 0x13DB #define FAMILY_VGH 144 #define DEVICE_ID_VGH_163F 0x163F #define DEVICE_ID_VGH_1435 0x1435 diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 250f09922d2f..ce421bcddcb0 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) { /* When the target refresh rate is the minimum panel refresh rate, * round down the vtotal value to avoid stretching vblank over * panel's vtotal boundary. @@ -226,8 +226,8 @@ static void update_v_total_for_static_ramp( unsigned int target_duration_in_us = calc_duration_in_us_from_refresh_in_uhz( in_out_vrr->fixed.target_refresh_in_uhz); - bool ramp_direction_is_up = (current_duration_in_us > - target_duration_in_us) ? true : false; + bool ramp_direction_is_up = current_duration_in_us > + target_duration_in_us; /* Calculate ratio between new and current frame duration with 3 digit */ unsigned int frame_duration_ratio = div64_u64(1000000, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 5e01c6e24cbc..c760216a6240 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -29,6 +29,7 @@ static void push_error_status(struct mod_hdcp *hdcp, enum mod_hdcp_status status) { struct mod_hdcp_trace *trace = &hdcp->connection.trace; + const uint8_t retry_limit = hdcp->connection.link.adjust.retry_limit; if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) { trace->errors[trace->error_count].status = status; @@ -39,11 +40,11 @@ static void push_error_status(struct mod_hdcp *hdcp, if (is_hdcp1(hdcp)) { hdcp->connection.hdcp1_retry_count++; - if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS) + if (hdcp->connection.hdcp1_retry_count == retry_limit) hdcp->connection.link.adjust.hdcp1.disable = 1; } else if (is_hdcp2(hdcp)) { hdcp->connection.hdcp2_retry_count++; - if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS) + if (hdcp->connection.hdcp2_retry_count == retry_limit) hdcp->connection.link.adjust.hdcp2.disable = 1; } } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e58e7b93810b..6b7db8ec9a53 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_FAILURE; } + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; mutex_lock(&psp->hdcp_context.mutex); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index c42468bb70ac..b51ddf2846df 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -220,6 +220,7 @@ struct mod_hdcp_link_adjustment_hdcp2 { struct mod_hdcp_link_adjustment { uint8_t auth_delay; + uint8_t retry_limit; struct mod_hdcp_link_adjustment_hdcp1 hdcp1; struct mod_hdcp_link_adjustment_hdcp2 hdcp2; }; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 758a8aa31fbe..391209a3bf29 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps); void reset_replay_dsync_error_count(struct dc_link *link); +void change_replay_to_psr(struct dc_link *link); +void change_psr_to_replay(struct dc_link *link); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 11374a2cbab8..75efda2969cf 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -239,18 +239,51 @@ enum amd_harvest_ip_mask { AMD_HARVEST_IP_DMU_MASK = 0x4, }; +/** + * enum DC_FEATURE_MASK - Bits that control DC feature defaults + */ enum DC_FEATURE_MASK { //Default value can be found at "uint amdgpu_dc_feature_mask" - DC_FBC_MASK = (1 << 0), //0x1, disabled by default - DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default - DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default - DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 - DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default - DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default - DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default - DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default - DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default - DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4 + /** + * @DC_FBC_MASK: (0x1) disabled by default + */ + DC_FBC_MASK = (1 << 0), + /** + * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default + */ + DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), + /** + * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default + */ + DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), + /** + * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1 + */ + DC_PSR_MASK = (1 << 3), + /** + * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default + */ + DC_EDP_NO_POWER_SEQUENCING = (1 << 4), + /** + * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default + */ + DC_DISABLE_LTTPR_DP1_4A = (1 << 5), + /** + * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default + */ + DC_DISABLE_LTTPR_DP2_0 = (1 << 6), + /** + * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default + */ + DC_PSR_ALLOW_SMU_OPT = (1 << 7), + /** + * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default + */ + DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), + /** + * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4 + */ + DC_REPLAY_MASK = (1 << 9), }; /** @@ -258,64 +291,64 @@ enum DC_FEATURE_MASK { */ enum DC_DEBUG_MASK { /** - * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting + * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting */ DC_DISABLE_PIPE_SPLIT = 0x1, /** - * @DC_DISABLE_STUTTER: If set, disable memory stutter mode + * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode */ DC_DISABLE_STUTTER = 0x2, /** - * @DC_DISABLE_DSC: If set, disable display stream compression + * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression */ DC_DISABLE_DSC = 0x4, /** - * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations + * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations */ DC_DISABLE_CLOCK_GATING = 0x8, /** - * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU + * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU */ DC_DISABLE_PSR = 0x10, /** - * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even + * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even * if mclk switch in vblank is possible */ DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, /** - * @DC_DISABLE_MPO: If set, disable multi-plane offloading + * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading */ DC_DISABLE_MPO = 0x40, /** - * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA + * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA */ DC_ENABLE_DPIA_TRACE = 0x80, /** - * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version + * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version * does not default to it. */ DC_ENABLE_DML2 = 0x100, /** - * @DC_DISABLE_PSR_SU: If set, disable PSR SU + * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU */ DC_DISABLE_PSR_SU = 0x200, /** - * @DC_DISABLE_REPLAY: If set, disable Panel Replay + * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay */ DC_DISABLE_REPLAY = 0x400, /** - * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time. + * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time. * If more than one IPS debug bit is set, the lowest bit takes * precedence. For example, if DC_FORCE_IPS_ENABLE and * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes @@ -324,56 +357,57 @@ enum DC_DEBUG_MASK { DC_DISABLE_IPS = 0x800, /** - * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time, + * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time, * *except* when driver goes into suspend. */ DC_DISABLE_IPS_DYNAMIC = 0x1000, /** - * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if + * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if * there is an enabled display. Otherwise, enable all IPS. */ DC_DISABLE_IPS2_DYNAMIC = 0x2000, /** - * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. + * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time. */ DC_FORCE_IPS_ENABLE = 0x4000, /** - * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for + * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for * eDP display from ACPI _DDC method. */ DC_DISABLE_ACPI_EDID = 0x8000, /** - * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver. + * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver. */ DC_DISABLE_HDMI_CEC = 0x10000, /** - * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted + * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted * Memory Clock Switching (FAMS) feature in amdgpu driver. */ DC_DISABLE_SUBVP_FAMS = 0x20000, /** - * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves + * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom + * brightness curves */ DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000, /** - * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW + * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW * path regardless of reported HW capabilities. */ DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000, /** - * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW + * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW * path failure, retry using legacy SW path. */ DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000, /** - * @DC_SKIP_DETECTION_LT: If set, skip detection link training + * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training */ DC_SKIP_DETECTION_LT = 0x200000, }; @@ -396,6 +430,7 @@ enum amd_dpm_forced_level; * (such as allocating any required memory) * @suspend: handles IP specific hw/sw changes for suspend * @resume: handles IP specific hw/sw changes for resume + * @complete: handles IP specific changes after resume * @is_idle: returns current IP block idle status * @wait_for_idle: poll for idle * @check_soft_reset: check soft reset the IP block @@ -427,6 +462,7 @@ struct amd_ip_funcs { int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); int (*suspend)(struct amdgpu_ip_block *ip_block); int (*resume)(struct amdgpu_ip_block *ip_block); + void (*complete)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(struct amdgpu_ip_block *ip_block); int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 5c86423c2e92..3d083010e734 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -211,7 +211,7 @@ atom_bios_string = "ATOM" }; */ -#pragma pack(1) /* BIOS data must use byte aligment*/ +#pragma pack(1) /* BIOS data must use byte alignment*/ enum atombios_image_offset{ OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048, @@ -255,8 +255,8 @@ struct atom_rom_header_v2_2 uint16_t subsystem_vendor_id; uint16_t subsystem_id; uint16_t pci_info_offset; - uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position - uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position + uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position + uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position uint16_t reserved; uint32_t pspdirtableoffset; }; @@ -453,7 +453,7 @@ struct atom_dtd_format uint8_t refreshrate; }; -/* atom_dtd_format.modemiscinfo defintion */ +/* atom_dtd_format.modemiscinfo definition */ enum atom_dtd_format_modemiscinfo{ ATOM_HSYNC_POLARITY = 0x0002, ATOM_VSYNC_POLARITY = 0x0004, @@ -678,7 +678,7 @@ struct lcd_info_v2_1 uint32_t reserved1[8]; }; -/* lcd_info_v2_1.panel_misc defintion */ +/* lcd_info_v2_1.panel_misc definition */ enum atom_lcd_info_panel_misc{ ATOM_PANEL_MISC_FPDI =0x0002, }; @@ -716,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id { /* gpio_id pre-define id for multiple usage */ /* GPIO use to control PCIE_VDDC in certain SLT board */ PCIE_VDDC_CONTROL_GPIO_PINID = 56, - /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */ + /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */ PP_AC_DC_SWITCH_GPIO_PINID = 60, /* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */ VDDC_VRHOT_GPIO_PINID = 61, @@ -734,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id { struct atom_gpio_pin_lut_v2_1 { struct atom_common_table_header table_header; - /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ + /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ struct atom_gpio_pin_assignment gpio_pin[]; }; @@ -997,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def { enum atom_display_device_tag_def{ ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display - ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability + ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility ATOM_DISPLAY_DFP1_SUPPORT = 0x0008, ATOM_DISPLAY_DFP2_SUPPORT = 0x0080, ATOM_DISPLAY_DFP3_SUPPORT = 0x0200, @@ -1011,7 +1011,7 @@ struct atom_display_object_path_v2 { uint16_t display_objid; //Connector Object ID or Misc Object ID uint16_t disp_recordoffset; - uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder + uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view; uint16_t encoder_recordoffset; uint16_t extencoder_recordoffset; @@ -1023,7 +1023,7 @@ struct atom_display_object_path_v2 struct atom_display_object_path_v3 { uint16_t display_objid; //Connector Object ID or Misc Object ID uint16_t disp_recordoffset; - uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder + uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder uint16_t reserved1; //only on USBC case, otherwise always = 0 uint16_t reserved2; //reserved and always = 0 uint16_t reserved3; //reserved and always = 0 @@ -3547,7 +3547,7 @@ struct atom_voltage_object_header_v4{ enum atom_voltage_object_mode { VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4 - VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4 + VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4 VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4 VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4 VOLTAGE_OBJ_EVV = 8, @@ -3585,7 +3585,7 @@ struct atom_gpio_voltage_object_v4 { struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode - uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table + uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table uint8_t phase_delay_us; // phase delay in unit of micro second uint8_t reserved; uint32_t gpio_mask_val; // GPIO Mask value @@ -4507,8 +4507,8 @@ struct amd_acpi_description_header{ struct uefi_acpi_vfct{ struct amd_acpi_description_header sheader; uint8_t tableUUID[16]; //0x24 - uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. - uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. + uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure. + uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure. uint32_t reserved[4]; //0x3C }; @@ -4540,7 +4540,7 @@ struct gop_lib1_content { /* *************************************************************************** Scratch Register definitions - Each number below indicates which scratch regiser request, Active and + Each number below indicates which scratch register request, Active and Connect all share the same definitions as display_device_tag defines *************************************************************************** */ diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h index acd1cef61b7c..349544504c93 100644 --- a/drivers/gpu/drm/amd/include/dm_pp_interface.h +++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h @@ -65,6 +65,7 @@ struct single_display_configuration { uint32_t view_resolution_cy; enum amd_pp_display_config_type displayconfigtype; uint32_t vertical_refresh; /* for active display */ + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ }; #define MAX_NUM_DISPLAY 32 diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index f4d914dc731f..2b0cdb2a2775 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block; extern const struct amdgpu_ip_block_version smu_v13_0_ip_block; extern const struct amdgpu_ip_block_version smu_v14_0_ip_block; +enum smu_temp_metric_type { + SMU_TEMP_METRIC_BASEBOARD, + SMU_TEMP_METRIC_GPUBOARD, + SMU_TEMP_METRIC_MAX, +}; + enum smu_event_type { SMU_EVENT_RESET_COMPLETE = 0, }; @@ -108,6 +114,8 @@ enum pp_clock_type { PP_VCLK1, PP_DCLK, PP_DCLK1, + PP_ISPICLK, + PP_ISPXCLK, OD_SCLK, OD_MCLK, OD_VDDC_CURVE, @@ -154,6 +162,10 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, AMDGPU_PP_SENSOR_VCN_LOAD, + AMDGPU_PP_SENSOR_NODEPOWERLIMIT, + AMDGPU_PP_SENSOR_NODEPOWER, + AMDGPU_PP_SENSOR_GPPTRESIDENCY, + AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, }; enum amd_pp_task { @@ -494,6 +506,8 @@ struct amd_pm_funcs { int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); ssize_t (*get_gpu_metrics)(void *handle, void **table); + ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table); + bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type); ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table); ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size); int (*set_watermarks_for_clock_ranges)(void *handle, @@ -1593,6 +1607,79 @@ struct amdgpu_pm_metrics { uint8_t data[]; }; +enum amdgpu_vr_temp { + AMDGPU_VDDCR_VDD0_TEMP, + AMDGPU_VDDCR_VDD1_TEMP, + AMDGPU_VDDCR_VDD2_TEMP, + AMDGPU_VDDCR_VDD3_TEMP, + AMDGPU_VDDCR_SOC_A_TEMP, + AMDGPU_VDDCR_SOC_C_TEMP, + AMDGPU_VDDCR_SOCIO_A_TEMP, + AMDGPU_VDDCR_SOCIO_C_TEMP, + AMDGPU_VDD_085_HBM_TEMP, + AMDGPU_VDDCR_11_HBM_B_TEMP, + AMDGPU_VDDCR_11_HBM_D_TEMP, + AMDGPU_VDD_USR_TEMP, + AMDGPU_VDDIO_11_E32_TEMP, + AMDGPU_VR_MAX_TEMP_ENTRIES, +}; + +enum amdgpu_system_temp { + AMDGPU_UBB_FPGA_TEMP, + AMDGPU_UBB_FRONT_TEMP, + AMDGPU_UBB_BACK_TEMP, + AMDGPU_UBB_OAM7_TEMP, + AMDGPU_UBB_IBC_TEMP, + AMDGPU_UBB_UFPGA_TEMP, + AMDGPU_UBB_OAM1_TEMP, + AMDGPU_OAM_0_1_HSC_TEMP, + AMDGPU_OAM_2_3_HSC_TEMP, + AMDGPU_OAM_4_5_HSC_TEMP, + AMDGPU_OAM_6_7_HSC_TEMP, + AMDGPU_UBB_FPGA_0V72_VR_TEMP, + AMDGPU_UBB_FPGA_3V3_VR_TEMP, + AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP, + AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP, + AMDGPU_RETIMER_0_1_0V9_VR_TEMP, + AMDGPU_RETIMER_4_5_0V9_VR_TEMP, + AMDGPU_RETIMER_2_3_0V9_VR_TEMP, + AMDGPU_RETIMER_6_7_0V9_VR_TEMP, + AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP, + AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP, + AMDGPU_IBC_HSC_TEMP, + AMDGPU_IBC_TEMP, + AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32, +}; + +enum amdgpu_node_temp { + AMDGPU_RETIMER_X_TEMP, + AMDGPU_OAM_X_IBC_TEMP, + AMDGPU_OAM_X_IBC_2_TEMP, + AMDGPU_OAM_X_VDD18_VR_TEMP, + AMDGPU_OAM_X_04_HBM_B_VR_TEMP, + AMDGPU_OAM_X_04_HBM_D_VR_TEMP, + AMDGPU_NODE_MAX_TEMP_ENTRIES = 12, +}; + +struct amdgpu_gpuboard_temp_metrics_v1_0 { + struct metrics_table_header common_header; + uint16_t label_version; + uint16_t node_id; + uint64_t accumulation_counter; + /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */ + uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES]; + uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES]; +}; + +struct amdgpu_baseboard_temp_metrics_v1_0 { + struct metrics_table_header common_header; + uint16_t label_version; + uint16_t node_id; + uint64_t accumulation_counter; + /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */ + uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES]; +}; + struct amdgpu_partition_metrics_v1_0 { struct metrics_table_header common_header; /* Current clocks (Mhz) */ diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 15680c3f4970..ab1cfc92dbeb 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -238,7 +238,8 @@ union MESAPI_SET_HW_RESOURCES { uint32_t enable_mes_sch_stb_log : 1; uint32_t limit_single_process : 1; uint32_t is_strix_tmz_wa_enabled :1; - uint32_t reserved : 13; + uint32_t enable_lr_compute_wa : 1; + uint32_t reserved : 12; }; uint32_t uint32_t_all; }; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index d85ffab2aff9..69611c7e30e3 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE { MES_SCH_API_SET_SE_MODE = 17, MES_SCH_API_SET_GANG_SUBMIT = 18, MES_SCH_API_SET_HW_RSRC_1 = 19, + MES_SCH_API_INV_TLBS = 20, MES_SCH_API_MAX = 0xFF }; @@ -286,7 +287,8 @@ union MESAPI_SET_HW_RESOURCES { uint32_t limit_single_process : 1; uint32_t unmapped_doorbell_handling: 2; uint32_t enable_mes_fence_int: 1; - uint32_t reserved : 10; + uint32_t enable_lr_compute_wa : 1; + uint32_t reserved : 9; }; uint32_t uint32_all; }; @@ -870,6 +872,35 @@ union MESAPI__SET_GANG_SUBMIT { uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; }; +/* + * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid + * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2 + * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1 + * @hub_id 0-gc_hub, 1-mm_hub + */ +struct INV_TLBS { + uint8_t inv_sel; + uint8_t flush_type; + uint16_t inv_sel_id; + uint32_t hub_id; + /* If following two inv_range setting are all 0 , whole VM will be invalidated, + * otherwise only required range be invalidated + */ + uint64_t inv_range_va_start; + uint64_t inv_range_size; + uint64_t reserved; +}; + +union MESAPI__INV_TLBS { + struct { + union MES_API_HEADER header; + struct MES_API_STATUS api_status; + struct INV_TLBS invalidate_tlbs; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + #pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5c1cbdc122d2..518d07afc7df 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: + case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate, 0)); @@ -763,10 +764,6 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) ret = smu_send_rma_reason(smu); mutex_unlock(&adev->pm.mutex); - if (adev->cper.enabled) - if (amdgpu_cper_generate_bp_threshold_record(adev)) - dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); - return ret; } @@ -823,6 +820,21 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask) return ret; } +bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + bool ret; + + if (!is_support_sw_smu(adev)) + return false; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_vcn_is_supported(smu); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, @@ -852,22 +864,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; - int ret = 0; - - if (type != PP_SCLK) - return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - mutex_lock(&adev->pm.mutex); - ret = smu_set_soft_freq_range(smu, - SMU_SCLK, + guard(mutex)(&adev->pm.mutex); + + return smu_set_soft_freq_range(smu, + type, min, max); - mutex_unlock(&adev->pm.mutex); - - return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) @@ -2043,6 +2049,66 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, } /** + * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute + * partition + * @adev: Pointer to the device. + * @type: Identifier for the temperature type metrics to be fetched. + * @table: Pointer to a buffer where the metrics will be stored. If NULL, the + * function returns the size of the metrics structure. + * + * This function retrieves metrics for a specific temperature type, If the + * table parameter is NULL, the function returns the size of the metrics + * structure without populating it. + * + * Return: Size of the metrics structure on success, or a negative error code on failure. + */ +ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev, + enum smu_temp_metric_type type, void *table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret; + + if (!pp_funcs->get_temp_metrics || + !amdgpu_dpm_is_temp_metrics_supported(adev, type)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +/** + * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support + * is available + * @adev: Pointer to the device. + * @type: Identifier for the temperature type metrics to be fetched. + * + * This function returns metrics if specific temperature metrics type is supported or not. + * + * Return: True in case of metrics type supported else false. + */ +bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, + enum smu_temp_metric_type type) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + bool support_temp_metrics = false; + + if (!pp_funcs->temp_metrics_is_supported) + return support_temp_metrics; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + support_temp_metrics = + pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type); + mutex_unlock(&adev->pm.mutex); + } + + return support_temp_metrics; +} + +/** * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute * partition * @adev: Pointer to the device. diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c index 42efe838fa85..b5e9c3ecf703 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c @@ -27,69 +27,69 @@ #include "amdgpu_smu.h" #include "amdgpu_dpm_internal.h" -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) +void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev) { struct drm_device *ddev = adev_to_drm(adev); + struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; + struct single_display_configuration *display_cfg; struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_connector *conn; + int num_crtcs = 0; + int vrefresh; + u32 vblank_in_pixels, vblank_time_us; + + cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */ - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; - } - } - } -} -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vblank_in_pixels; - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ + /* The array should only contain active displays. */ + if (!amdgpu_crtc->enabled) + continue; + + conn = to_amdgpu_connector(amdgpu_crtc->connector); + display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++]; + + if (amdgpu_crtc->hw_mode.clock) { + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { vblank_in_pixels = amdgpu_crtc->hw_mode.crtc_htotal * (amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + (amdgpu_crtc->v_border * 2)); - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; - break; - } - } - } + vblank_time_us = + vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; - return vblank_time_us; -} + /* The legacy (non-DC) code has issues with mclk switching + * with refresh rates over 120 Hz. Disable mclk switching. + */ + if (vrefresh > 120) + vblank_time_us = 0; -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vrefresh = 0; + /* Find minimum vblank time. */ + if (vblank_time_us < cfg->min_vblank_time) + cfg->min_vblank_time = vblank_time_us; - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - break; + /* Find vertical refresh rate of first active display. */ + if (!cfg->vrefresh) + cfg->vrefresh = vrefresh; } + + if (amdgpu_crtc->crtc_id < cfg->crtc_index) { + /* Find first active CRTC and its line time. */ + cfg->crtc_index = amdgpu_crtc->crtc_id; + cfg->line_time_in_us = amdgpu_crtc->line_time; + } + + display_cfg->controller_id = amdgpu_crtc->crtc_id; + display_cfg->pixel_clock = conn->pixelclock_for_modeset; } } - return vrefresh; + cfg->display_clk = adev->clock.default_dispclk; + cfg->num_display = num_crtcs; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index edd9895b46c0..b5fbb0fd1dc0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -110,9 +110,10 @@ static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) bool runpm_check = runpm ? adev->in_runpm : false; if (amdgpu_in_reset(adev)) - return -EPERM; + return -EBUSY; + if (adev->in_suspend && !runpm_check) - return -EPERM; + return -EBUSY; return 0; } @@ -1398,6 +1399,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) return -EINVAL; parameter_size++; + if (!tmp_str) + break; while (isspace(*tmp_str)) tmp_str++; } @@ -1418,9 +1421,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, return -EINVAL; } -static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, - enum amd_pp_sensors sensor, - void *query) +static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev, + enum amd_pp_sensors sensor, + void *query) { int r, size = sizeof(uint32_t); @@ -1453,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); if (r) return r; @@ -1477,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); if (r) return r; @@ -1501,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); if (r) return r; @@ -1780,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev, uint32_t ss_power; int r = 0, i; - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); if (r == -EOPNOTSUPP) { /* sensor not available on dGPU, try to read from APU */ adev = NULL; @@ -1793,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev, } mutex_unlock(&mgpu_info.mutex); if (adev) - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); } if (r) @@ -1890,7 +1893,7 @@ out: static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -1901,13 +1904,13 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { uint32_t ss_power; - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, - (void *)&ss_power)) + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, - (void *)&ss_power)) + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -2071,6 +2074,265 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd return 0; } +/** + * DOC: board + * + * Certain SOCs can support various board attributes reporting. This is useful + * for user application to monitor various board reated attributes. + * + * The amdgpu driver provides a sysfs API for reporting board attributes. Presently, + * seven types of attributes are reported. Baseboard temperature and + * gpu board temperature are reported as binary files. Npm status, current node power limit, + * max node power limit, node power and global ppt residency is reported as ASCII text file. + * + * * .. code-block:: console + * + * hexdump /sys/bus/pci/devices/.../board/baseboard_temp + * + * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp + * + * hexdump /sys/bus/pci/devices/.../board/npm_status + * + * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit + * + * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit + * + * hexdump /sys/bus/pci/devices/.../board/node_power + * + * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid + */ + +/** + * DOC: baseboard_temp + * + * The amdgpu driver provides a sysfs API for retrieving current baseboard + * temperature metrics data. The file baseboard_temp is used for this. + * Reading the file will dump all the current baseboard temperature metrics data. + */ +static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t size; + int ret; + + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; + + size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL); + if (size <= 0) + goto out; + if (size >= PAGE_SIZE) { + ret = -ENOSPC; + goto out; + } + + amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf); + +out: + amdgpu_pm_put_access(adev); + + if (ret) + return ret; + + return size; +} + +/** + * DOC: gpuboard_temp + * + * The amdgpu driver provides a sysfs API for retrieving current gpuboard + * temperature metrics data. The file gpuboard_temp is used for this. + * Reading the file will dump all the current gpuboard temperature metrics data. + */ +static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t size; + int ret; + + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) + return ret; + + size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL); + if (size <= 0) + goto out; + if (size >= PAGE_SIZE) { + ret = -ENOSPC; + goto out; + } + + amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf); + +out: + amdgpu_pm_put_access(adev); + + if (ret) + return ret; + + return size; +} + +/** + * DOC: cur_node_power_limit + * + * The amdgpu driver provides a sysfs API for retrieving current node power limit. + * The file cur_node_power_limit is used for this. + */ +static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 nplimit; + int r; + + /* get the current node power limit */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT, + (void *)&nplimit); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", nplimit); +} + +/** + * DOC: node_power + * + * The amdgpu driver provides a sysfs API for retrieving current node power. + * The file node_power is used for this. + */ +static ssize_t amdgpu_show_node_power(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 npower; + int r; + + /* get the node power */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, + (void *)&npower); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", npower); +} + +/** + * DOC: npm_status + * + * The amdgpu driver provides a sysfs API for retrieving current node power management status. + * The file npm_status is used for this. It shows the status as enabled or disabled based on + * current node power value. If node power is zero, status is disabled else enabled. + */ +static ssize_t amdgpu_show_npm_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 npower; + int r; + + /* get the node power */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, + (void *)&npower); + if (r) + return r; + + return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled"); +} + +/** + * DOC: global_ppt_resid + * + * The amdgpu driver provides a sysfs API for retrieving global ppt residency. + * The file global_ppt_resid is used for this. + */ +static ssize_t amdgpu_show_global_ppt_resid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 gpptresid; + int r; + + /* get the global ppt residency */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY, + (void *)&gpptresid); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", gpptresid); +} + +/** + * DOC: max_node_power_limit + * + * The amdgpu driver provides a sysfs API for retrieving maximum node power limit. + * The file max_node_power_limit is used for this. + */ +static ssize_t amdgpu_show_max_node_power_limit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 max_nplimit; + int r; + + /* get the max node power limit */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, + (void *)&max_nplimit); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", max_nplimit); +} + +static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL); +static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL); +static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL); +static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL); +static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL); +static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL); +static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL); + +static struct attribute *board_attrs[] = { + &dev_attr_baseboard_temp.attr, + &dev_attr_gpuboard_temp.attr, + NULL +}; + +static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (attr == &dev_attr_baseboard_temp.attr) { + if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD)) + return 0; + } + + if (attr == &dev_attr_gpuboard_temp.attr) { + if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) + return 0; + } + + return attr->mode; +} + +const struct attribute_group amdgpu_board_attr_group = { + .name = "board", + .attrs = board_attrs, + .is_visible = amdgpu_board_attr_visible, +}; + /* pm policy attributes */ struct amdgpu_pm_policy_attr { struct device_attribute dev_attr; @@ -2505,18 +2767,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, switch (channel) { case PP_TEMP_JUNCTION: /* get current junction temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, + (void *)&temp); break; case PP_TEMP_EDGE: /* get current edge temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, + (void *)&temp); break; case PP_TEMP_MEM: /* get current memory temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, + (void *)&temp); break; default: r = -EINVAL; @@ -2778,8 +3040,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 min_rpm = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, - (void *)&min_rpm); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, + (void *)&min_rpm); if (r) return r; @@ -2795,8 +3057,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 max_rpm = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, - (void *)&max_rpm); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + (void *)&max_rpm); if (r) return r; @@ -2929,8 +3191,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, int r; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, - (void *)&vddgfx); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, + (void *)&vddgfx); if (r) return r; @@ -2946,8 +3208,8 @@ static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev, int r; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, - (void *)&vddboard); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&vddboard); if (r) return r; @@ -2980,8 +3242,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, return -EINVAL; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, - (void *)&vddnb); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, + (void *)&vddnb); if (r) return r; @@ -3003,7 +3265,7 @@ static int amdgpu_hwmon_get_power(struct device *dev, u32 query = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query); if (r) return r; @@ -3123,9 +3385,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - err = kstrtou32(buf, 10, &value); if (err) return err; @@ -3156,8 +3415,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, int r; /* get the sclk */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, - (void *)&sclk); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, + (void *)&sclk); if (r) return r; @@ -3180,8 +3439,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, int r; /* get the sclk */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, - (void *)&mclk); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, + (void *)&mclk); if (r) return r; @@ -3456,14 +3715,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ - if (((adev->family == AMDGPU_FAMILY_SI) || - ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && - (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr)) - return 0; + if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) { + if (adev->family == AMDGPU_FAMILY_SI || + ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) || + (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3))) + return 0; + } + + if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr && + amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT)) + effective_mode |= S_IWUSR; /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ if (((adev->family == AMDGPU_FAMILY_SI) || @@ -3473,10 +3738,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* not all products support both average and instantaneous */ if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, + (void *)&tmp) == -EOPNOTSUPP) return 0; if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, + (void *)&tmp) == -EOPNOTSUPP) return 0; /* hide max/min values if we can't both query and manage the fan */ @@ -3515,8 +3782,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* only few boards support vddboard */ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || attr == &sensor_dev_attr_in2_label.dev_attr.attr) && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, - (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&tmp) == -EOPNOTSUPP) return 0; /* no mclk on APUs other than gc 9,4,3*/ @@ -3645,6 +3912,9 @@ static int parse_input_od_command_lines(const char *buf, return -EINVAL; parameter_size++; + if (!tmp_str) + break; + while (isspace(*tmp_str)) tmp_str++; } @@ -4395,6 +4665,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { enum amdgpu_sriov_vf_mode mode; uint32_t mask = 0; + uint32_t tmp; int ret; if (adev->pm.sysfs_initialized) @@ -4456,6 +4727,28 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) goto err_out0; } + if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) { + ret = devm_device_add_group(adev->dev, + &amdgpu_board_attr_group); + if (ret) + goto err_out0; + if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, + (void *)&tmp) != -EOPNOTSUPP) { + sysfs_add_file_to_group(&adev->dev->kobj, + &dev_attr_cur_node_power_limit.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, + &dev_attr_max_node_power_limit.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr, + amdgpu_board_attr_group.name); + } + } + adev->pm.sysfs_initialized = true; return 0; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 768317ee1486..65c1d98af26c 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -263,10 +263,6 @@ struct amdgpu_dpm { u32 voltage_response_time; u32 backbias_response_time; void *priv; - u32 new_active_crtcs; - int new_active_crtc_count; - u32 current_active_crtcs; - int current_active_crtc_count; struct amdgpu_dpm_dynamic_state dyn_state; struct amdgpu_dpm_fan fan; u32 tdp_limit; @@ -526,6 +522,8 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, void *table); +ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev, + enum smu_temp_metric_type type, void *table); /** * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The @@ -613,5 +611,8 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask); bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev); int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask); +bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev); +bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, + enum smu_temp_metric_type type); #endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h index 5c2a89f0d5d5..cc6d7ba040e9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h @@ -23,10 +23,6 @@ #ifndef __AMDGPU_DPM_INTERNAL_H__ #define __AMDGPU_DPM_INTERNAL_H__ -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); - -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); - -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); +void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 34e71727b27d..33eb85dd68e9 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, enable); if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); } } @@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = kv_process_firmware_header(adev); if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); + drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n"); return ret; } kv_init_fps_limits(adev); kv_init_graphics_levels(adev); ret = kv_program_bootup_state(adev); if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); + drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n"); return ret; } kv_calculate_dfs_bypass_settings(adev); ret = kv_upload_dpm_settings(adev); if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); + drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n"); return ret; } ret = kv_populate_uvd_table(adev); if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n"); return ret; } ret = kv_populate_vce_table(adev); if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n"); return ret; } ret = kv_populate_samu_table(adev); if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n"); return ret; } ret = kv_populate_acp_table(adev); if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n"); return ret; } kv_program_vc(adev); @@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev) if (pi->enable_auto_thermal_throttling) { ret = kv_enable_auto_thermal_throttling(adev); if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n"); return ret; } } ret = kv_enable_dpm_voltage_scaling(adev); if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n"); return ret; } ret = kv_set_dpm_interval(adev); if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n"); return ret; } ret = kv_set_dpm_boot_state(adev); if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n"); return ret; } ret = kv_enable_ulv(adev, true); if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n"); return ret; } kv_start_dpm(adev); ret = kv_enable_didt(adev, true); if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_didt failed\n"); return ret; } ret = kv_enable_smc_cac(adev, true); if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n"); return ret; } @@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = amdgpu_kv_smc_bapm_enable(adev, false); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } @@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n"); return ret; } amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, @@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev) err = amdgpu_kv_smc_bapm_enable(adev, false); if (err) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); if (adev->asic_type == CHIP_MULLINS) kv_enable_nb_dpm(adev, false); @@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } } @@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle) ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_sclk_t(adev); @@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle) kv_set_enabled_levels(adev); ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_acp_boot_level(adev); @@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, if (pi->sys_info.nb_dpm_enable) { force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || - pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || + pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) || pi->disable_nb_ps3_in_battery; ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; ps->dpm0_pg_nb_ps_hi = 0x2; @@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) return 0; force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || - (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); + (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start); if (force_high) { for (i = pi->lowest_valid; i <= pi->highest_valid; i++) @@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } @@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) data_offset); if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); @@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) else pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n"); } if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) @@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps) struct kv_ps *ps = kv_get_ps(rps); struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n", + rps->vclk, rps->dclk); for (i = 0; i < ps->num_levels; i++) { struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + drm_dbg(adev_to_drm(adev), + "power level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static void kv_dpm_fini(struct amdgpu_device *adev) @@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - DRM_INFO("amdgpu: dpm initialized\n"); + drm_info(adev_to_drm(adev), "dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - DRM_ERROR("amdgpu: dpm initialization failed\n"); + drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index c7518b13e787..c7ed0b457129 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -47,7 +47,7 @@ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -void amdgpu_dpm_print_class_info(u32 class, u32 class2) +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2) { const char *s; @@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2) s = "performance"; break; } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); + drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s); if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tinternal class: none\n"); + else + drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "", + (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "", + (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "", + (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "", + (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "", + (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "", + (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "", + (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : ""); } -void amdgpu_dpm_print_cap_info(u32 caps) +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps) { - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n", + (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "", + (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "", + (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : ""); } -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps) { - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n", + rps == adev->pm.dpm.current_ps ? " c" : "", + rps == adev->pm.dpm.requested_ps ? " r" : "", + rps == adev->pm.dpm.boot_ps ? " b" : ""); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) adev->pm.fan_max_rpm = controller->ucFanMaxRPM; } if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_CI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n", controller->ucType, controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -797,8 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, int i; struct amdgpu_ps *ps; u32 ui_class; - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? - true : false; + bool single_display = adev->pm.pm_display_cfg.num_display < 2; /* check if the vblank period is too short to adjust the mclk */ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { @@ -943,9 +916,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) return -EINVAL; if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { - printk("switching from power state:\n"); + drm_dbg(adev_to_drm(adev), "switching from power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); + drm_dbg(adev_to_drm(adev), "switching to power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } @@ -971,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) amdgpu_dpm_post_set_power_state(adev); - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - if (pp_funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; @@ -994,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_get_active_displays(adev); + if (!adev->dc_enabled) + amdgpu_dpm_get_display_cfg(adev); amdgpu_dpm_change_power_state_locked(adev); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h index 93bd3973330c..7120eef30509 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -23,10 +23,9 @@ #ifndef __LEGACY_DPM_H__ #define __LEGACY_DPM_H__ -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2); +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps); +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 4c0e976004ba..cf9932e68055 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3081,11 +3081,17 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev, static bool si_dpm_vblank_too_short(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time; /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; - if (vblank_time < switch_limit) + /* Consider zero vblank time too short and disable MCLK switching. + * Note that the vblank time is set to maximum when no displays are attached, + * so we'll still enable MCLK switching in that case. + */ + if (vblank_time == 0) + return true; + else if (vblank_time < switch_limit) return true; else return false; @@ -3441,6 +3447,8 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) static void si_apply_state_adjust_rules(struct amdgpu_device *adev, struct amdgpu_ps *rps) { + const struct amd_pp_display_configuration *display_cfg = + &adev->pm.pm_display_cfg; struct si_ps *ps = si_get_ps(rps); struct amdgpu_clock_and_voltage_limits *max_limits; bool disable_mclk_switching = false; @@ -3449,6 +3457,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, u16 vddc, vddci, min_vce_voltage = 0; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; + u32 high_pixelclock_count = 0; int i; if (adev->asic_type == CHIP_HAINAN) { @@ -3476,6 +3485,30 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, } } + /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz. + * For example, 4K 60Hz and 1080p 144Hz fall into this category. + * Find number of such displays connected. + */ + for (i = 0; i < display_cfg->num_display; i++) { + /* The array only contains active displays. */ + if (display_cfg->displays[i].pixel_clock > 297000) + high_pixelclock_count++; + } + + /* These are some ad-hoc fixes to some issues observed with SI GPUs. + * They are necessary because we don't have something like dce_calcs + * for these GPUs to calculate bandwidth requirements. + */ + if (high_pixelclock_count) { + /* On Oland, we observe some flickering when two 4K 60Hz + * displays are connected, possibly because voltage is too low. + * Raise the voltage by requiring a higher SCLK. + * (Voltage cannot be adjusted independently without also SCLK.) + */ + if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND) + disable_sclk_switching = true; + } + if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; @@ -3486,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, rps->ecclk = 0; } - if ((adev->pm.dpm.new_active_crtc_count > 1) || + if ((adev->pm.pm_display_cfg.num_display > 1) || si_dpm_vblank_too_short(adev)) disable_mclk_switching = true; @@ -3634,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, ps->performance_levels[i].mclk, max_limits->vddc, &ps->performance_levels[i].vddc); btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, - adev->clock.current_dispclk, + display_cfg->display_clk, max_limits->vddc, &ps->performance_levels[i].vddc); } @@ -4159,16 +4192,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev) static void si_program_display_gap(struct amdgpu_device *adev) { + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; u32 tmp, pipe; - int i; tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); - if (adev->pm.dpm.new_active_crtc_count > 0) + if (cfg->num_display > 0) tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; else tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; - if (adev->pm.dpm.new_active_crtc_count > 1) + if (cfg->num_display > 1) tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; else tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; @@ -4178,17 +4211,8 @@ static void si_program_display_gap(struct amdgpu_device *adev) tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; - if ((adev->pm.dpm.new_active_crtc_count > 0) && - (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { - /* find the first active crtc */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) - break; - } - if (i == adev->mode_info.num_crtc) - pipe = 0; - else - pipe = i; + if (cfg->num_display > 0 && pipe != cfg->crtc_index) { + pipe = cfg->crtc_index; tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; tmp |= DCCG_DISP1_SLOW_SELECT(pipe); @@ -4199,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev) * This can be a problem on PowerXpress systems or if you want to use the card * for offscreen rendering or compute if there are no crtcs enabled. */ - si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); + si_notify_smc_display_change(adev, cfg->num_display > 0); } static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) @@ -5508,7 +5532,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, (pl->mclk <= pi->mclk_stutter_mode_threshold) && !eg_pi->uvd_enabled && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && - (adev->pm.dpm.new_active_crtc_count <= 2)) { + (adev->pm.pm_display_cfg.num_display <= 2)) { level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; } @@ -5637,14 +5661,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev, static int si_disable_ulv(struct amdgpu_device *adev) { - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; + PPSMC_Result r; - return 0; + r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV); + return (r == PPSMC_Result_OK) ? 0 : -EINVAL; } static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, @@ -5661,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, /* XXX validate against display requirements! */ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { - if (adev->clock.current_dispclk <= + if (adev->pm.pm_display_cfg.display_clk <= adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { if (ulv->pl.vddc < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) @@ -5815,39 +5835,36 @@ static int si_upload_ulv_state(struct amdgpu_device *adev) static int si_upload_smc_data(struct amdgpu_device *adev) { - struct amdgpu_crtc *amdgpu_crtc = NULL; - int i; + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; + u32 crtc_index = 0; + u32 mclk_change_block_cp_min = 0; + u32 mclk_change_block_cp_max = 0; - if (adev->pm.dpm.new_active_crtc_count == 0) - return 0; + /* When a display is plugged in, program these so that the SMC + * performs MCLK switching when it doesn't cause flickering. + * When no display is plugged in, there is no need to restrict + * MCLK switching, so program them to zero. + */ + if (cfg->num_display) { + crtc_index = cfg->crtc_index; - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) { - amdgpu_crtc = adev->mode_info.crtcs[i]; - break; + if (cfg->line_time_in_us) { + mclk_change_block_cp_min = 200 / cfg->line_time_in_us; + mclk_change_block_cp_max = 100 / cfg->line_time_in_us; } } - if (amdgpu_crtc == NULL) - return 0; - - if (amdgpu_crtc->line_time <= 0) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_crtc_index, - amdgpu_crtc->crtc_id) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_crtc_index, + crtc_index); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, - amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, + mclk_change_block_cp_min); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, - amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, + mclk_change_block_cp_max); return 0; } @@ -7951,15 +7968,16 @@ static void si_dpm_print_power_state(void *handle, struct rv7xx_pl *pl; int i; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c index 4e65ab9e931c..281a5e377aee 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c @@ -172,20 +172,42 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, { u32 tmp; int i; + int usec_timeout; + + /* SMC seems to process some messages exceptionally slowly. */ + switch (msg) { + case PPSMC_MSG_NoForcedLevel: + case PPSMC_MSG_SetEnabledLevels: + case PPSMC_MSG_SetForcedLevels: + case PPSMC_MSG_DisableULV: + case PPSMC_MSG_SwitchToSwState: + usec_timeout = 1000000; /* 1 sec */ + break; + default: + usec_timeout = 200000; /* 200 ms */ + break; + } if (!amdgpu_si_is_smc_running(adev)) return PPSMC_Result_Failed; WREG32(mmSMC_MESSAGE_0, msg); - for (i = 0; i < adev->usec_timeout; i++) { + for (i = 0; i < usec_timeout; i++) { tmp = RREG32(mmSMC_RESP_0); if (tmp != 0) break; udelay(1); } - return (PPSMC_Result)RREG32(mmSMC_RESP_0); + tmp = RREG32(mmSMC_RESP_0); + if (tmp == 0) { + drm_warn(adev_to_drm(adev), + "%s timeout on message: %x (SMC_SCRATCH0: %x)\n", + __func__, msg, RREG32(mmSMC_SCRATCH0)); + } + + return (PPSMC_Result)tmp; } PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index b48a031cbba0..554492dfa3c0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1554,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle) struct amdgpu_device *adev = hwmgr->adev; if (!adev->dc_enabled) { - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with - * refresh rates over 120 hz on the non-DC code. - */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - + amdgpu_dpm_get_display_cfg(adev); pp_display_configuration_change(handle, &adev->pm.pm_display_cfg); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index 8d40ed0f0e83..ce166a7f8e42 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -563,8 +563,8 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3( PP_ASSERT_WITH_CODE((NULL != voltage_info), "Could not find Voltage Table in BIOS.", return false;); - ret = (NULL != atomctrl_lookup_voltage_type_v3 - (voltage_info, voltage_type, voltage_mode)) ? true : false; + ret = atomctrl_lookup_voltage_type_v3 + (voltage_info, voltage_type, voltage_mode) != NULL; return ret; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 9a821563bc8e..14ccd743ca1d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1032,7 +1032,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, data->clock_vol_info.vdd_dep_on_fclk; uint32_t i, now, size = 0; uint32_t min_freq, max_freq = 0; - uint32_t ret = 0; + int ret = 0; switch (type) { case PP_SCLK: diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index 79a566f3564a..c305ea4ec17d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, indirect_port, index); - return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask); } int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index 5e43ad2b2956..d2dbd90bb427 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2540,9 +2540,8 @@ static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) { - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; + return PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1; } static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 17d2f5bff4a7..1f50f1e74c48 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2655,9 +2655,8 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) { - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; + return PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1; } const struct pp_smumgr_func iceland_smu_funcs = { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c index ff6b563ecbf5..bf6d09572cfc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c @@ -2578,9 +2578,8 @@ static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) { - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; + return PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1; } static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c index baf51cd82a35..0d4cbe4113a0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c @@ -401,7 +401,7 @@ failed: int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); - uint32_t ret; + int ret; ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, smu_data->soft_regs_start + smum_get_offsetof(hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c index 6fe6e6abb5d8..2e21f9d066cb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c @@ -3139,9 +3139,8 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) { - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; + return PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1; } static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d79a1d94661a..fb8086859857 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -76,6 +76,10 @@ static void smu_power_profile_mode_get(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); +static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -134,12 +138,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) } int smu_set_soft_freq_range(struct smu_context *smu, - enum smu_clk_type clk_type, + enum pp_clock_type type, uint32_t min, uint32_t max) { + enum smu_clk_type clk_type; int ret = 0; + clk_type = smu_convert_to_smuclk(type); + if (clk_type == SMU_CLK_COUNT) + return -EINVAL; + if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, @@ -307,6 +316,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu, return ret; } +static int smu_dpm_set_isp_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret; + + if (!smu->ppt_funcs->dpm_set_isp_enable) + return 0; + + if (atomic_read(&power_gate->isp_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->isp_gated, !enable); + + return ret; +} + static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -408,6 +437,12 @@ static int smu_dpm_set_power_gate(void *handle, dev_err(smu->adev->dev, "Failed to power %s VPE!\n", gate ? "gate" : "ungate"); break; + case AMD_IP_BLOCK_TYPE_ISP: + ret = smu_dpm_set_isp_enable(smu, !gate); + if (ret) + dev_err(smu->adev->dev, "Failed to power %s ISP!\n", + gate ? "gate" : "ungate"); + break; default: dev_err(smu->adev->dev, "Unsupported block type!\n"); return -EINVAL; @@ -1004,6 +1039,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } +static void smu_update_gpu_addresses(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; + struct smu_table *driver_table = &(smu_table->driver_table); + struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; + + if (pm_status_table->bo) + pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); + if (driver_table->bo) + driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); + if (dummy_read_1_table->bo) + dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); +} + /** * smu_alloc_memory_pool - allocate memory pool in the system memory * @@ -1265,6 +1315,33 @@ static void smu_init_power_profile(struct smu_context *smu) smu_power_profile_mode_get(smu, smu->power_profile_mode); } +void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) + return; + + set_bit(fea_id, fea_cap->cap_map); +} + +bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) + return false; + + return test_bit(fea_id, fea_cap->cap_map); +} + +static void smu_feature_cap_init(struct smu_context *smu) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT); +} + static int smu_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1285,6 +1362,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); + atomic_set(&smu->smu_power.power_gate.isp_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); smu_init_power_profile(smu); @@ -1296,6 +1374,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) INIT_DELAYED_WORK(&smu->swctf_delayed_work, smu_swctf_delayed_work_handler); + smu_feature_cap_init(smu); + ret = smu_smc_table_sw_init(smu); if (ret) { dev_err(adev->dev, "Failed to sw init smc table!\n"); @@ -1672,37 +1752,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) } } - ret = smu_system_features_control(smu, true); - if (ret) { - dev_err(adev->dev, "Failed to enable requested dpm features!\n"); - return ret; - } - - smu_init_xgmi_plpd_mode(smu); - - ret = smu_feature_get_enabled_mask(smu, &features_supported); - if (ret) { - dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); - return ret; - } - bitmap_copy(feature->supported, - (unsigned long *)&features_supported, - feature->feature_num); - - if (!smu_is_dpm_running(smu)) - dev_info(adev->dev, "dpm has been disabled\n"); - - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) pcie_gen = 4; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -1738,6 +1787,37 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + ret = smu_system_features_control(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable requested dpm features!\n"); + return ret; + } + + smu_init_xgmi_plpd_mode(smu); + + ret = smu_feature_get_enabled_mask(smu, &features_supported); + if (ret) { + dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); + return ret; + } + bitmap_copy(feature->supported, + (unsigned long *)&features_supported, + feature->feature_num); + + if (!smu_is_dpm_running(smu)) + dev_info(adev->dev, "dpm has been disabled\n"); + + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_get_thermal_temperature_range(smu); if (ret) { dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); @@ -1780,6 +1860,9 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + smu_update_gpu_addresses(smu); + smu->smc_fw_state = SMU_FW_INIT; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -1842,7 +1925,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) for (i = 0; i < adev->vcn.num_vcn_inst; i++) smu_dpm_set_vcn_enable(smu, true, i); smu_dpm_set_jpeg_enable(smu, true); - smu_dpm_set_vpe_enable(smu, true); smu_dpm_set_umsch_mm_enable(smu, true); smu_set_mall_enable(smu); smu_set_gfx_cgpg(smu, true); @@ -2050,7 +2132,6 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block) } smu_dpm_set_jpeg_enable(smu, false); adev->jpeg.cur_state = AMD_PG_STATE_GATE; - smu_dpm_set_vpe_enable(smu, false); smu_dpm_set_umsch_mm_enable(smu, false); if (!smu->pm_enabled) @@ -2144,6 +2225,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2175,6 +2257,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; + if (smu->current_power_limit) { + ret = smu_set_power_limit(smu, smu->current_power_limit); + if (ret && ret != -EOPNOTSUPP) + return ret; + } + + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { + ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); + if (ret) + return ret; + } + dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2935,6 +3029,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCLK; break; case PP_DCLK1: clk_type = SMU_DCLK1; break; + case PP_ISPICLK: + clk_type = SMU_ISPICLK; + break; + case PP_ISPXCLK: + clk_type = SMU_ISPXCLK; + break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: @@ -3434,15 +3534,10 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) bool smu_link_reset_is_support(struct smu_context *smu) { - bool ret = false; - if (!smu->pm_enabled) return false; - if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support) - ret = smu->ppt_funcs->link_reset_is_support(smu); - - return ret; + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET); } int smu_mode1_reset(struct smu_context *smu) @@ -3758,6 +3853,51 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, return ret; } +static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table) +{ + struct smu_context *smu = handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + enum smu_table_id table_id; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics) + return -EOPNOTSUPP; + + table_id = smu_metrics_get_temp_table_id(type); + + if (table_id == SMU_TABLE_COUNT) + return -EINVAL; + + /* If the request is to get size alone, return the cached table size */ + if (!table && tables[table_id].cache.size) + return tables[table_id].cache.size; + + if (smu_table_cache_is_valid(&tables[table_id])) { + memcpy(table, tables[table_id].cache.buffer, + tables[table_id].cache.size); + return tables[table_id].cache.size; + } + + return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table); +} + +static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type) +{ + struct smu_context *smu = handle; + bool ret = false; + + if (!smu->pm_enabled) + return false; + + if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported) + ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type); + + return ret; +} + static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) { struct smu_context *smu = handle; @@ -3830,6 +3970,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_dpm_clock_table = smu_get_dpm_clock_table, .get_smu_prv_buf_details = smu_get_prv_buffer_details, .get_xcp_metrics = smu_sys_get_xcp_metrics, + .get_temp_metrics = smu_sys_get_temp_metrics, + .temp_metrics_is_supported = smu_temp_metrics_is_supported, }; int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, @@ -3985,12 +4127,7 @@ int smu_send_rma_reason(struct smu_context *smu) */ bool smu_reset_sdma_is_supported(struct smu_context *smu) { - bool ret = false; - - if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported) - ret = smu->ppt_funcs->reset_sdma_is_supported(smu); - - return ret; + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); } int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) @@ -4003,6 +4140,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) return ret; } +bool smu_reset_vcn_is_supported(struct smu_context *smu) +{ + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET); +} + int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) { if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 9aacc7bc1c69..582c186d8b62 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -249,6 +249,14 @@ struct smu_user_dpm_profile { tables[table_id].domain = d; \ } while (0) +struct smu_table_cache { + void *buffer; + size_t size; + /* interval in ms*/ + uint32_t interval; + unsigned long last_cache_time; +}; + struct smu_table { uint64_t size; uint32_t align; @@ -257,6 +265,7 @@ struct smu_table { void *cpu_addr; struct amdgpu_bo *bo; uint32_t version; + struct smu_table_cache cache; }; enum smu_perf_level_designation { @@ -322,6 +331,9 @@ enum smu_table_id { SMU_TABLE_ECCINFO, SMU_TABLE_COMBO_PPTABLE, SMU_TABLE_WIFIBAND, + SMU_TABLE_GPUBOARD_TEMP_METRICS, + SMU_TABLE_BASEBOARD_TEMP_METRICS, + SMU_TABLE_PMFW_SYSTEM_METRICS, SMU_TABLE_COUNT, }; @@ -396,12 +408,17 @@ struct smu_dpm_context { struct smu_dpm_policy_ctxt *dpm_policies; }; +struct smu_temp_context { + const struct smu_temp_funcs *temp_funcs; +}; + struct smu_power_gate { bool uvd_gated; bool vce_gated; atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; + atomic_t isp_gated; atomic_t umsch_mm_gated; }; @@ -511,6 +528,17 @@ enum smu_fw_status { */ #define SMU_WBRF_EVENT_HANDLING_PACE 10 +enum smu_feature_cap_id { + SMU_FEATURE_CAP_ID__LINK_RESET = 0, + SMU_FEATURE_CAP_ID__SDMA_RESET, + SMU_FEATURE_CAP_ID__VCN_RESET, + SMU_FEATURE_CAP_ID__COUNT, +}; + +struct smu_feature_cap { + DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT); +}; + struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -528,10 +556,12 @@ struct smu_context { struct smu_table_context smu_table; struct smu_dpm_context smu_dpm; struct smu_power_context smu_power; + struct smu_temp_context smu_temp; struct smu_feature smu_feature; struct amd_pp_display_configuration *display_config; struct smu_baco_context smu_baco; struct smu_temperature_range thermal_range; + struct smu_feature_cap fea_cap; void *od_settings; struct smu_umd_pstate_table pstate_table; @@ -623,6 +653,28 @@ struct smu_context { struct i2c_adapter; /** + * struct smu_temp_funcs - Callbacks used to get temperature data. + */ +struct smu_temp_funcs { + /** + * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's + * power delivery and voltage margins. Required for adaptive + * @type Temperature metrics type(baseboard/gpuboard) + * Return: Size of &table + */ + ssize_t (*get_temp_metrics)(struct smu_context *smu, + enum smu_temp_metric_type type, void *table); + + /** + * @temp_metrics_is_support: Get if specific temperature metrics is supported + * @type Temperature metrics type(baseboard/gpuboard) + * Return: true if supported else false + */ + bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type); + +}; + +/** * struct pptable_funcs - Callbacks used to interact with the SMU. */ struct pptable_funcs { @@ -1233,11 +1285,6 @@ struct pptable_funcs { bool (*mode1_reset_is_support)(struct smu_context *smu); /** - * @link_reset_is_support: Check if GPU supports link reset. - */ - bool (*link_reset_is_support)(struct smu_context *smu); - - /** * @mode1_reset: Perform mode1 reset. * * Complete GPU reset. @@ -1387,10 +1434,6 @@ struct pptable_funcs { * @reset_sdma: message SMU to soft reset sdma instance. */ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); - /** - * @reset_sdma_is_supported: Check if support resets the SDMA engine. - */ - bool (*reset_sdma_is_supported)(struct smu_context *smu); /** * @reset_vcn: message SMU to soft reset vcn instance. @@ -1436,6 +1479,12 @@ struct pptable_funcs { int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); /** + * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power + * management. + */ + int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); + + /** * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power * management. */ @@ -1615,6 +1664,71 @@ typedef struct { struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type); +static inline enum smu_table_id +smu_metrics_get_temp_table_id(enum smu_temp_metric_type type) +{ + switch (type) { + case SMU_TEMP_METRIC_BASEBOARD: + return SMU_TABLE_BASEBOARD_TEMP_METRICS; + case SMU_TEMP_METRIC_GPUBOARD: + return SMU_TABLE_GPUBOARD_TEMP_METRICS; + default: + return SMU_TABLE_COUNT; + } + + return SMU_TABLE_COUNT; +} + +static inline void smu_table_cache_update_time(struct smu_table *table, + unsigned long time) +{ + table->cache.last_cache_time = time; +} + +static inline bool smu_table_cache_is_valid(struct smu_table *table) +{ + if (!table->cache.buffer || !table->cache.last_cache_time || + !table->cache.interval || !table->cache.size || + time_after(jiffies, + table->cache.last_cache_time + + msecs_to_jiffies(table->cache.interval))) + return false; + + return true; +} + +static inline int smu_table_cache_init(struct smu_context *smu, + enum smu_table_id table_id, size_t size, + uint32_t cache_interval) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL); + if (!tables[table_id].cache.buffer) + return -ENOMEM; + + tables[table_id].cache.last_cache_time = 0; + tables[table_id].cache.interval = cache_interval; + tables[table_id].cache.size = size; + + return 0; +} + +static inline void smu_table_cache_fini(struct smu_context *smu, + enum smu_table_id table_id) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + if (tables[table_id].cache.buffer) { + kfree(tables[table_id].cache.buffer); + tables[table_id].cache.buffer = NULL; + tables[table_id].cache.last_cache_time = 0; + tables[table_id].cache.interval = 0; + } +} + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, @@ -1635,7 +1749,7 @@ int smu_write_watermarks_table(struct smu_context *smu); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, uint32_t min, uint32_t max); int smu_set_gfx_power_up_by_imu(struct smu_context *smu); @@ -1666,10 +1780,14 @@ int smu_send_rma_reason(struct smu_context *smu); int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); bool smu_reset_sdma_is_supported(struct smu_context *smu); int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask); +bool smu_reset_vcn_is_supported(struct smu_context *smu); int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); #endif + +void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); +bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 1bc30db22f9c..cd44f4254134 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -106,6 +106,7 @@ typedef struct { #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 +#define ISP_ALL_TILES_MASK 0x7FF typedef struct { uint32_t UClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index d7505cfc433a..bf6aa9620911 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -86,8 +86,10 @@ typedef enum { /*36*/ FEATURE_PIT = 36, /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, +/*39*/ FEATURE_GLOBAL_DPM = 39, +/*40*/ FEATURE_NODE_POWER_MANAGER = 40, -/*39*/ NUM_FEATURES = 39 +/*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -133,7 +135,63 @@ typedef enum { GFX_DVM_MARGIN_COUNT } GFX_DVM_MARGIN_e; -#define SMU_METRICS_TABLE_VERSION 0x12 +typedef enum{ + SYSTEM_TEMP_UBB_FPGA, + SYSTEM_TEMP_UBB_FRONT, + SYSTEM_TEMP_UBB_BACK, + SYSTEM_TEMP_UBB_OAM7, + SYSTEM_TEMP_UBB_IBC, + SYSTEM_TEMP_UBB_UFPGA, + SYSTEM_TEMP_UBB_OAM1, + SYSTEM_TEMP_OAM_0_1_HSC, + SYSTEM_TEMP_OAM_2_3_HSC, + SYSTEM_TEMP_OAM_4_5_HSC, + SYSTEM_TEMP_OAM_6_7_HSC, + SYSTEM_TEMP_UBB_FPGA_0V72_VR, + SYSTEM_TEMP_UBB_FPGA_3V3_VR, + SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR, + SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR, + SYSTEM_TEMP_RETIMER_0_1_0V9_VR, + SYSTEM_TEMP_RETIMER_4_5_0V9_VR, + SYSTEM_TEMP_RETIMER_2_3_0V9_VR, + SYSTEM_TEMP_RETIMER_6_7_0V9_VR, + SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR, + SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR, + SYSTEM_TEMP_IBC_HSC, + SYSTEM_TEMP_IBC, + SYSTEM_TEMP_MAX_ENTRIES = 32 +} SYSTEM_TEMP_e; + +typedef enum{ + NODE_TEMP_RETIMER, + NODE_TEMP_IBC_TEMP, + NODE_TEMP_IBC_2_TEMP, + NODE_TEMP_VDD18_VR_TEMP, + NODE_TEMP_04_HBM_B_VR_TEMP, + NODE_TEMP_04_HBM_D_VR_TEMP, + NODE_TEMP_MAX_TEMP_ENTRIES = 12 +} NODE_TEMP_e; + +typedef enum { + SVI_VDDCR_VDD0_TEMP, + SVI_VDDCR_VDD1_TEMP, + SVI_VDDCR_VDD2_TEMP, + SVI_VDDCR_VDD3_TEMP, + SVI_VDDCR_SOC_A_TEMP, + SVI_VDDCR_SOC_C_TEMP, + SVI_VDDCR_SOCIO_A_TEMP, + SVI_VDDCR_SOCIO_C_TEMP, + SVI_VDD_085_HBM_TEMP, + SVI_VDDCR_11_HBM_B_TEMP, + SVI_VDDCR_11_HBM_D_TEMP, + SVI_VDD_USR_TEMP, + SVI_VDDIO_11_E32_TEMP, + SVI_MAX_TEMP_ENTRIES, // 13 +} SVI_TEMP_e; + +#define SMU_METRICS_TABLE_VERSION 0x14 + +#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -229,11 +287,32 @@ typedef struct __attribute__((packed, aligned(4))) { uint64_t GfxclkBelowHostLimitThmAcc[8]; uint64_t GfxclkBelowHostLimitTotalAcc[8]; uint64_t GfxclkLowUtilizationAcc[8]; + + uint32_t AidTemperature[4]; + uint32_t XcdTemperature[8]; + uint32_t HbmTemperature[8]; } MetricsTable_t; #define SMU_VF_METRICS_TABLE_MASK (1 << 31) #define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK) +#pragma pack(push, 4) +typedef struct { + uint64_t AccumulationCounter; // Last update timestamp + uint16_t LabelVersion; // Defaults to 0. + uint16_t NodeIdentifier; // Unique identifier to each node on system. + int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF + int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF + int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius + int16_t spare[7]; + + //NPM: NODE POWER MANAGEMENT + uint32_t NodePowerLimit; + uint32_t NodePower; + uint32_t GlobalPPTResidencyAcc; +} SystemMetricsTable_t; +#pragma pack(pop) + typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; uint32_t InstGfxclk_TargFreq; @@ -275,6 +354,19 @@ typedef struct { //PSNs uint64_t PublicSerialNumber_AID[4]; uint64_t PublicSerialNumber_XCD[8]; + + //XGMI + uint32_t MaxXgmiWidth; + uint32_t MaxXgmiBitrate; + + // Telemetry + uint32_t InputTelemetryVoltageInmV; + + // General info + uint32_t pldmVersion[2]; + + //Node Power Limit + uint32_t MaxNodePowerLimit; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h index e1f490b6ce64..4b066c42e0ec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -116,7 +116,12 @@ #define PPSMC_MSG_DumpErrorRecord 0x57 #define PPSMC_MSG_EraseRasTable 0x58 #define PPSMC_MSG_GetStaticMetricsTable 0x59 -#define PPSMC_Message_Count 0x5A +#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A +#define PPSMC_MSG_GetBadPageSeverity 0x5B +#define PPSMC_MSG_GetSystemMetricsTable 0x5C +#define PPSMC_MSG_GetSystemMetricsVersion 0x5D +#define PPSMC_MSG_ResetVCN 0x5E +#define PPSMC_Message_Count 0x5F //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 41f268313613..63a088ef7169 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -94,9 +94,9 @@ #define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 #define PPSMC_MSG_SetThrottlingPolicy 0x44 #define PPSMC_MSG_ResetSDMA 0x4D -#define PPSMC_MSG_ResetVCN 0x4E #define PPSMC_MSG_GetStaticMetricsTable 0x59 -#define PPSMC_Message_Count 0x5A +#define PPSMC_MSG_ResetVCN 0x5B +#define PPSMC_Message_Count 0x5C //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index eefdaa0b5df6..2256c77da636 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -278,7 +278,8 @@ __SMU_DUMMY_MAP(MALLPowerState), \ __SMU_DUMMY_MAP(ResetSDMA), \ __SMU_DUMMY_MAP(ResetVCN), \ - __SMU_DUMMY_MAP(GetStaticMetricsTable), + __SMU_DUMMY_MAP(GetStaticMetricsTable), \ + __SMU_DUMMY_MAP(GetSystemMetricsTable), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -305,6 +306,8 @@ enum smu_clk_type { SMU_MCLK, SMU_PCIE, SMU_LCLK, + SMU_ISPICLK, + SMU_ISPXCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, @@ -467,6 +470,7 @@ enum smu_feature_mask { /* Message category flags */ #define SMU_MSG_VF_FLAG (1U << 0) #define SMU_MSG_RAS_PRI (1U << 1) +#define SMU_MSG_NO_PRECHECK (1U << 2) /* Firmware capability flags */ #define SMU_FW_CAP_RAS_PRI (1U << 0) diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h index 251ed011b3b0..251ed011b3b0 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 9ad46f545d15..4fff78da81ff 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1745,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu) snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -1756,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void arcturus_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -1897,7 +1882,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, ret = smu_cmn_get_metrics_table(smu, &metrics, - true); + false); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 7fad5dfb39c4..0028f10ead42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, @@ -3142,10 +3145,10 @@ static int navi10_i2c_control_init(struct smu_context *smu) control->quirks = &navi10_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -3153,27 +3156,12 @@ static int navi10_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void navi10_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 115e3fa456bc..31c2c0386b1f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint8_t min_gen_speed, max_gen_speed; uint8_t min_lane_width, max_lane_width; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); @@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16 | + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) || + table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) { + smu_pcie_arg = (i << 16 | pcie_table->pcie_gen[i] << 8 | pcie_table->pcie_lane[i]); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, @@ -2644,10 +2648,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu) control->quirks = &sienna_cichlid_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } /* assign the buses used for the FRU EEPROM and RAS EEPROM */ @@ -2656,27 +2660,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void sienna_cichlid_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index a55ea76d7399..2c9869feba61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 9481f897432d..3baf20f4c373 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -470,7 +470,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu, static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { uint32_t min = 0, max = 0; - uint32_t ret = 0; + int ret = 0; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinGfxclkFrequency, @@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); bool cur_value_match_level = false; memset(&metrics, 0, sizeof(metrics)); @@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMinGfxclkFrequency, - 0, &min); - if (ret) - return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMaxGfxclkFrequency, - 0, &max); - if (ret) - return ret; - size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); - } + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMinGfxclkFrequency, + 0, &min); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMaxGfxclkFrequency, + 0, &max); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); break; case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; - max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; - size += sysfs_emit_at(buf, size, "OD_SCLK\n"); - size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); - size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); - } + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + size += sysfs_emit_at(buf, size, "OD_SCLK\n"); + size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); + size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); break; case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 6de653d2ed62..18d5d0704509 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -1586,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) control->quirks = &aldebaran_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - i2c_del_adapter(control); - - return res; } static void aldebaran_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -1726,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, ret = smu_cmn_get_metrics_table(smu, &metrics, - true); + false); if (ret) return ret; @@ -2081,7 +2125,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, .get_bamaco_support = aldebaran_get_bamaco_support, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 1c7235935d14..1a1f2a6b2e52 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2386,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, &dpm_context->dpm_tables.pcie_table; int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; if (!num_of_levels) return 0; @@ -2402,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } } - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; - } - - return 0; + return ret; } int smu_v13_0_disable_pmfw_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5a9711e8cf68..c1062e5f0393 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2845,10 +2825,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v13_0_0_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2858,27 +2838,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -3150,6 +3115,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + uint32_t smu_pcie_arg; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -3179,7 +3228,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_0_print_clk_levels, .force_clk_levels = smu_v13_0_0_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index e0d356f93ab0..cb3fea9e8cf3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -83,7 +83,6 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), }; -// clang-format off const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -106,7 +105,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -137,9 +136,57 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), + MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), }; +int smu_v13_0_12_tables_init(struct smu_context *smu) +{ + struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics; + struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + struct smu_table_cache *cache; + int ret; + + ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS, + smu_v13_0_12_get_system_metrics_size(), 5); + + if (ret) + return ret; + + ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS, + sizeof(*baseboard_temp_metrics), 50); + if (ret) + return ret; + /* Initialize base board temperature metrics */ + cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache); + baseboard_temp_metrics = + (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer; + smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0); + /* Initialize GPU board temperature metrics */ + ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS, + sizeof(*gpuboard_temp_metrics), 50); + if (ret) { + smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS); + smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS); + return ret; + } + cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache); + gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer; + smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0); + + return 0; +} + +void smu_v13_0_12_tables_fini(struct smu_context *smu) +{ + smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS); + smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS); + smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS); +} + static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, uint64_t *feature_mask) { @@ -187,14 +234,45 @@ int smu_v13_0_12_get_max_metrics_size(void) return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); } +size_t smu_v13_0_12_get_system_metrics_size(void) +{ + return sizeof(SystemMetricsTable_t); +} + +static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint16_t max_speed; + uint8_t max_width; + int ret; + + if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) { + max_width = (uint8_t)static_metrics->MaxXgmiWidth; + max_speed = (uint16_t)static_metrics->MaxXgmiBitrate; + ret = 0; + } else { + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (!ret) { + max_width = (uint8_t)metrics->XgmiWidth; + max_speed = (uint16_t)metrics->XgmiBitrate; + } + } + if (!ret) + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); +} + int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; uint32_t table_version; - int ret, i; + int ret, i, n; if (!pptable->Init) { ret = smu_v13_0_6_get_static_metrics_table(smu); @@ -233,10 +311,41 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) /* use AID0 serial number by default */ pptable->PublicSerialNumber_AID = static_metrics->PublicSerialNumber_AID[0]; + + amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC, + 0, pptable->PublicSerialNumber_AID); + n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID); + for (i = 0; i < n; i++) { + amdgpu_device_set_uid( + smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i, + static_metrics->PublicSerialNumber_AID[i]); + } + n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD); + for (i = 0; i < n; i++) { + amdgpu_device_set_uid( + smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i, + static_metrics->PublicSerialNumber_XCD[i]); + } + ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics); if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + } + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) + pptable->MaxNodePowerLimit = + SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); + smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -263,7 +372,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; int xcc_id; /* For clocks with multiple instances, only report the first one */ @@ -319,9 +427,295 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, break; } + return 0; +} + +static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + struct smu_table *tables = smu_table->tables; + struct smu_table *sys_table; + int ret; + + sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS]; + if (smu_table_cache_is_valid(sys_table)) + return 0; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export system metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + smu_table_cache_update_time(sys_table, jiffies); + memcpy(sys_table->cache.buffer, table->cpu_addr, + smu_v13_0_12_get_system_metrics_size()); + + return 0; +} + +static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type) +{ + switch (type) { + case NODE_TEMP_RETIMER: + return AMDGPU_RETIMER_X_TEMP; + case NODE_TEMP_IBC_TEMP: + return AMDGPU_OAM_X_IBC_TEMP; + case NODE_TEMP_IBC_2_TEMP: + return AMDGPU_OAM_X_IBC_2_TEMP; + case NODE_TEMP_VDD18_VR_TEMP: + return AMDGPU_OAM_X_VDD18_VR_TEMP; + case NODE_TEMP_04_HBM_B_VR_TEMP: + return AMDGPU_OAM_X_04_HBM_B_VR_TEMP; + case NODE_TEMP_04_HBM_D_VR_TEMP: + return AMDGPU_OAM_X_04_HBM_D_VR_TEMP; + default: + return -EINVAL; + } +} + +static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type) +{ + switch (type) { + case SVI_VDDCR_VDD0_TEMP: + return AMDGPU_VDDCR_VDD0_TEMP; + case SVI_VDDCR_VDD1_TEMP: + return AMDGPU_VDDCR_VDD1_TEMP; + case SVI_VDDCR_VDD2_TEMP: + return AMDGPU_VDDCR_VDD2_TEMP; + case SVI_VDDCR_VDD3_TEMP: + return AMDGPU_VDDCR_VDD3_TEMP; + case SVI_VDDCR_SOC_A_TEMP: + return AMDGPU_VDDCR_SOC_A_TEMP; + case SVI_VDDCR_SOC_C_TEMP: + return AMDGPU_VDDCR_SOC_C_TEMP; + case SVI_VDDCR_SOCIO_A_TEMP: + return AMDGPU_VDDCR_SOCIO_A_TEMP; + case SVI_VDDCR_SOCIO_C_TEMP: + return AMDGPU_VDDCR_SOCIO_C_TEMP; + case SVI_VDD_085_HBM_TEMP: + return AMDGPU_VDD_085_HBM_TEMP; + case SVI_VDDCR_11_HBM_B_TEMP: + return AMDGPU_VDDCR_11_HBM_B_TEMP; + case SVI_VDDCR_11_HBM_D_TEMP: + return AMDGPU_VDDCR_11_HBM_D_TEMP; + case SVI_VDD_USR_TEMP: + return AMDGPU_VDD_USR_TEMP; + case SVI_VDDIO_11_E32_TEMP: + return AMDGPU_VDDIO_11_E32_TEMP; + default: + return -EINVAL; + } +} + +static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type) +{ + switch (type) { + case SYSTEM_TEMP_UBB_FPGA: + return AMDGPU_UBB_FPGA_TEMP; + case SYSTEM_TEMP_UBB_FRONT: + return AMDGPU_UBB_FRONT_TEMP; + case SYSTEM_TEMP_UBB_BACK: + return AMDGPU_UBB_BACK_TEMP; + case SYSTEM_TEMP_UBB_OAM7: + return AMDGPU_UBB_OAM7_TEMP; + case SYSTEM_TEMP_UBB_IBC: + return AMDGPU_UBB_IBC_TEMP; + case SYSTEM_TEMP_UBB_UFPGA: + return AMDGPU_UBB_UFPGA_TEMP; + case SYSTEM_TEMP_UBB_OAM1: + return AMDGPU_UBB_OAM1_TEMP; + case SYSTEM_TEMP_OAM_0_1_HSC: + return AMDGPU_OAM_0_1_HSC_TEMP; + case SYSTEM_TEMP_OAM_2_3_HSC: + return AMDGPU_OAM_2_3_HSC_TEMP; + case SYSTEM_TEMP_OAM_4_5_HSC: + return AMDGPU_OAM_4_5_HSC_TEMP; + case SYSTEM_TEMP_OAM_6_7_HSC: + return AMDGPU_OAM_6_7_HSC_TEMP; + case SYSTEM_TEMP_UBB_FPGA_0V72_VR: + return AMDGPU_UBB_FPGA_0V72_VR_TEMP; + case SYSTEM_TEMP_UBB_FPGA_3V3_VR: + return AMDGPU_UBB_FPGA_3V3_VR_TEMP; + case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR: + return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP; + case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR: + return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP; + case SYSTEM_TEMP_RETIMER_0_1_0V9_VR: + return AMDGPU_RETIMER_0_1_0V9_VR_TEMP; + case SYSTEM_TEMP_RETIMER_4_5_0V9_VR: + return AMDGPU_RETIMER_4_5_0V9_VR_TEMP; + case SYSTEM_TEMP_RETIMER_2_3_0V9_VR: + return AMDGPU_RETIMER_2_3_0V9_VR_TEMP; + case SYSTEM_TEMP_RETIMER_6_7_0V9_VR: + return AMDGPU_RETIMER_6_7_0V9_VR_TEMP; + case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR: + return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP; + case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR: + return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP; + case SYSTEM_TEMP_IBC_HSC: + return AMDGPU_IBC_HSC_TEMP; + case SYSTEM_TEMP_IBC: + return AMDGPU_IBC_TEMP; + default: + return -EINVAL; + } +} + +static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu, + enum smu_temp_metric_type type) +{ + switch (type) { + case SMU_TEMP_METRIC_BASEBOARD: + if (smu->adev->gmc.xgmi.physical_node_id == 0 && + smu->adev->gmc.xgmi.num_physical_nodes > 1 && + smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS))) + return true; + break; + case SMU_TEMP_METRIC_GPUBOARD: + return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)); + default: + break; + } + + return false; +} + +int smu_v13_0_12_get_npm_data(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + struct smu_table *tables = smu_table->tables; + SystemMetricsTable_t *metrics; + struct smu_table *sys_table; + int ret; + + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) + return -EOPNOTSUPP; + + if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) { + *value = pptable->MaxNodePowerLimit; + return 0; + } + + ret = smu_v13_0_12_get_system_metrics_table(smu); + if (ret) + return ret; + + sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS]; + metrics = (SystemMetricsTable_t *)sys_table->cache.buffer; + + switch (sensor) { + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: + *value = SMUQ10_ROUND(metrics->NodePowerLimit); + break; + case AMDGPU_PP_SENSOR_NODEPOWER: + *value = SMUQ10_ROUND(metrics->NodePower); + break; + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: + *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc); + break; + default: + return -EINVAL; + } + return ret; } +static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu, + enum smu_temp_metric_type type, void *table) +{ + struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics; + struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + SystemMetricsTable_t *metrics; + struct smu_table *data_table; + struct smu_table *sys_table; + int ret, sensor_type; + u32 idx, sensors; + ssize_t size; + + if (type == SMU_TEMP_METRIC_BASEBOARD) { + /* Initialize base board temperature metrics */ + data_table = + &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS]; + baseboard_temp_metrics = + (struct amdgpu_baseboard_temp_metrics_v1_0 *) + data_table->cache.buffer; + size = sizeof(*baseboard_temp_metrics); + } else { + data_table = + &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS]; + gpuboard_temp_metrics = + (struct amdgpu_gpuboard_temp_metrics_v1_0 *) + data_table->cache.buffer; + size = sizeof(*baseboard_temp_metrics); + } + + ret = smu_v13_0_12_get_system_metrics_table(smu); + if (ret) + return ret; + + sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS]; + metrics = (SystemMetricsTable_t *)sys_table->cache.buffer; + smu_table_cache_update_time(data_table, jiffies); + + if (type == SMU_TEMP_METRIC_GPUBOARD) { + gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter; + gpuboard_temp_metrics->label_version = metrics->LabelVersion; + gpuboard_temp_metrics->node_id = metrics->NodeIdentifier; + + idx = 0; + for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) { + if (metrics->NodeTemperatures[sensors] != -1) { + sensor_type = smu_v13_0_12_get_node_sensor_type(sensors); + gpuboard_temp_metrics->node_temp[idx] = + ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF; + gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24); + idx++; + } + } + + idx = 0; + + for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) { + if (metrics->VrTemperatures[sensors] != -1) { + sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors); + gpuboard_temp_metrics->vr_temp[idx] = + ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF; + gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24); + idx++; + } + } + } else if (type == SMU_TEMP_METRIC_BASEBOARD) { + baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter; + baseboard_temp_metrics->label_version = metrics->LabelVersion; + baseboard_temp_metrics->node_id = metrics->NodeIdentifier; + + idx = 0; + for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) { + if (metrics->SystemTemperatures[sensors] != -1) { + sensor_type = smu_v13_0_12_get_system_sensor_type(sensors); + baseboard_temp_metrics->system_temp[idx] = + ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF; + baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24); + idx++; + } + } + } + + memcpy(table, data_table->cache.buffer, size); + + return size; +} + ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) { const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; @@ -535,3 +929,8 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void return sizeof(*gpu_metrics); } + +const struct smu_temp_funcs smu_v13_0_12_temp_funcs = { + .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported, + .get_temp_metrics = smu_v13_0_12_get_temp_metrics, +}; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index f00ef7f3f355..cbe5b06438c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -143,9 +143,9 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), - MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -177,7 +177,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), - MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), }; // clang-format on @@ -312,6 +312,8 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu) smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); if (fw_ver >= 0x5551200) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + if (fw_ver >= 0x5551800) + smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); if (fw_ver >= 0x5551600) { smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); @@ -345,6 +347,25 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00562500) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + + if (fw_ver >= 0x04560100) { + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } + + if (fw_ver > 0x04560900) + smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); + + if (fw_ver >= 0x04560700) { + if (fw_ver >= 0x04560900) { + smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); + if (smu->adev->gmc.xgmi.physical_node_id == 0) + smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS)); + } else if (!amdgpu_sriov_vf(smu->adev)) + smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); + } else { + smu_v13_0_12_tables_fini(smu); + } } static void smu_v13_0_6_init_caps(struct smu_context *smu) @@ -397,19 +418,41 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) if ((pgm == 7 && fw_ver >= 0x7550E00) || (pgm == 0 && fw_ver >= 0x00557E00)) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); - if ((pgm == 0 && fw_ver >= 0x00557F01) || - (pgm == 7 && fw_ver >= 0x7551000)) { - smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); - smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + + if (amdgpu_sriov_vf(adev)) { + if (fw_ver >= 0x00558200) + amdgpu_virt_attr_set(&adev->virt.virt_caps, + AMDGPU_VIRT_CAP_POWER_LIMIT, + AMDGPU_CAP_ATTR_RW); + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) { + smu_v13_0_6_cap_set(smu, + SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, + SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } + } else { + if ((pgm == 0 && fw_ver >= 0x00557F01) || + (pgm == 7 && fw_ver >= 0x7551000)) { + smu_v13_0_6_cap_set(smu, + SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, + SMU_CAP(BOARD_VOLTAGE)); + } + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); } - if ((pgm == 0 && fw_ver >= 0x00558000) || - (pgm == 7 && fw_ver >= 0x7551000)) - smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); } if (((pgm == 7) && (fw_ver >= 0x7550700)) || ((pgm == 0) && (fw_ver >= 0x00557900)) || ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + + if (((pgm == 0) && (fw_ver >= 0x00558200)) || + ((pgm == 4) && (fw_ver >= 0x04557100))) + smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } static void smu_v13_0_x_init_caps(struct smu_context *smu) @@ -506,8 +549,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + void *gpu_metrics_table __free(kfree) = NULL; + void *driver_pptable __free(kfree) = NULL; + void *metrics_table __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; int gpu_metrcs_size = METRICS_TABLE_SIZE; + int ret; if (!(adev->flags & AMD_IS_APU)) SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, @@ -523,27 +570,36 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - if (!smu_table->metrics_table) + SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS, + smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); + + metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + if (!metrics_table) return -ENOMEM; smu_table->metrics_time = 0; smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); - smu_table->gpu_metrics_table = + gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); - if (!smu_table->gpu_metrics_table) { - kfree(smu_table->metrics_table); + if (!gpu_metrics_table) return -ENOMEM; - } - smu_table->driver_pptable = - kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); - if (!smu_table->driver_pptable) { - kfree(smu_table->metrics_table); - kfree(smu_table->gpu_metrics_table); + driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); + if (!driver_pptable) return -ENOMEM; + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12)) { + ret = smu_v13_0_12_tables_init(smu); + if (ret) + return ret; } + smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table); + smu_table->metrics_table = no_free_ptr(metrics_table); + smu_table->driver_pptable = no_free_ptr(driver_pptable); + return 0; } @@ -672,6 +728,13 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu) return ret; } +static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) + smu_v13_0_12_tables_fini(smu); + return smu_v13_0_fini_smc_tables(smu); +} + static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) @@ -685,8 +748,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, - void *metrics_table, bool bypass_cache) +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; @@ -798,8 +861,10 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; int version = smu_v13_0_6_get_metrics_version(smu); - int ret, i, retry = 100; + int ret, i, retry = 100, n; uint32_t table_version; + uint16_t max_speed; + uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) @@ -835,6 +900,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); + max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version); + max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version); + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = @@ -855,6 +923,23 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0]; + amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC, + 0, pptable->PublicSerialNumber_AID); + n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID); + for (i = 0; i < n; i++) { + amdgpu_device_set_uid( + smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i, + GET_METRIC_FIELD(PublicSerialNumber_AID, + version)[i]); + } + n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD); + for (i = 0; i < n; i++) { + amdgpu_device_set_uid( + smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i, + GET_METRIC_FIELD(PublicSerialNumber_XCD, + version)[i]); + } + pptable->Init = true; if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { ret = smu_v13_0_6_get_static_metrics_table(smu); @@ -871,51 +956,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - uint32_t clock_limit = 0, param; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk, param; int ret = 0, clk_id = 0; - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + /* Use dpm tables, if data is already fetched */ + if (pptable->Init) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK: - if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.uclk_table; break; case SMU_GFXCLK: case SMU_SCLK: - if (pptable->Init) - clock_limit = pptable->MinGfxclkFrequency; + dpm_table = &dpm_context->dpm_tables.gfx_table; break; case SMU_SOCCLK: - if (pptable->Init) - clock_limit = pptable->SocclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.soc_table; break; case SMU_FCLK: - if (pptable->Init) - clock_limit = pptable->FclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.fclk_table; break; case SMU_VCLK: - if (pptable->Init) - clock_limit = pptable->VclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.vclk_table; break; case SMU_DCLK: - if (pptable->Init) - clock_limit = pptable->DclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.dclk_table; break; default: - break; + return -EINVAL; } - if (min) - *min = clock_limit; + min_clk = dpm_table->min; + max_clk = dpm_table->max; + if (min) + *min = min_clk; if (max) - *max = clock_limit; + *max = max_clk; - return 0; + if (min_clk && max_clk) + return 0; } if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { @@ -1377,8 +1462,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - min_clk = pstate_table->gfxclk_pstate.curr.min; - max_clk = pstate_table->gfxclk_pstate.curr.max; + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + min_clk = single_dpm_table->min; + max_clk = single_dpm_table->max; if (now < SMU_13_0_6_DSCLK_THRESHOLD) { size += sysfs_emit_at(buf, size, "S: %uMhz *\n", @@ -1720,6 +1806,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: + case AMDGPU_PP_SENSOR_NODEPOWER: + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: + case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT: + ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data); + if (ret) + return ret; + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; @@ -2415,10 +2510,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v13_0_6_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2426,27 +2521,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -2549,9 +2629,9 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; int version = smu_v13_0_6_get_metrics_version(smu); struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; int ret, inst, i, j, k, idx; - MetricsTableV0_t *metrics_v0; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; @@ -2576,17 +2656,14 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, return -ENOMEM; ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); - if (ret) { - kfree(metrics_v0); + if (ret) return ret; - } if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { - ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); - goto out; - } + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_xcp_metrics(smu, xcp, table, + metrics_v0); metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; @@ -2657,8 +2734,6 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, idx++; } } -out: - kfree(metrics_v0); return sizeof(*xcp_metrics); } @@ -2669,31 +2744,26 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table struct gpu_metrics_v1_8 *gpu_metrics = (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; int version = smu_v13_0_6_get_metrics_version(smu); + MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - MetricsTableV0_t *metrics_v0; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; u16 link_width_level; - ssize_t num_bytes; u8 num_jpeg_rings; u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); - if (ret) { - kfree(metrics_v0); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + if (ret) return ret; - } - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { - num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); - kfree(metrics_v0); - return num_bytes; - } + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; @@ -2879,7 +2949,6 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); *table = (void *)gpu_metrics; - kfree(metrics_v0); return sizeof(*gpu_metrics); } @@ -3065,7 +3134,7 @@ static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int var = (adev->pdev->device & 0xF); - if (var == 0x1) + if (var == 0x0 || var == 0x1 || var == 0x3) return true; return false; @@ -3141,6 +3210,11 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask) return ret; } +static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu) +{ + return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET)); +} + static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) { int ret = 0; @@ -3154,6 +3228,20 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) } +static int smu_v13_0_6_post_init(struct smu_context *smu) +{ + if (smu_v13_0_6_is_link_reset_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET); + + if (smu_v13_0_6_reset_sdma_is_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); + + if (smu_v13_0_6_reset_vcn_is_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET); + + return 0; +} + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3770,6 +3858,12 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .parse_error_code = aca_smu_parse_error_code, }; +static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) +{ + smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3786,7 +3880,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .init_microcode = smu_v13_0_6_init_microcode, .fini_microcode = smu_v13_0_fini_microcode, .init_smc_tables = smu_v13_0_6_init_smc_tables, - .fini_smc_tables = smu_v13_0_fini_smc_tables, + .fini_smc_tables = smu_v13_0_6_fini_smc_tables, .init_power = smu_v13_0_init_power, .fini_power = smu_v13_0_fini_power, .check_fw_status = smu_v13_0_6_check_fw_status, @@ -3817,7 +3911,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, .link_reset = smu_v13_0_6_link_reset, @@ -3827,8 +3920,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, .send_rma_reason = smu_v13_0_6_send_rma_reason, .reset_sdma = smu_v13_0_6_reset_sdma, - .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, + .post_init = smu_v13_0_6_post_init, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) @@ -3843,6 +3936,8 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); + smu_v13_0_6_set_temp_funcs(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); } + diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index d38d6d76b1e7..7ef5f3e66c27 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -49,6 +49,7 @@ struct PPTable_t { uint32_t MaxLclkDpmRange; uint32_t MinLclkDpmRange; uint64_t PublicSerialNumber_AID; + uint32_t MaxNodePowerLimit; bool Init; }; @@ -64,19 +65,25 @@ enum smu_v13_0_6_caps { SMU_CAP(RMA_MSG), SMU_CAP(ACA_SYND), SMU_CAP(SDMA_RESET), + SMU_CAP(VCN_RESET), SMU_CAP(STATIC_METRICS), SMU_CAP(HST_LIMIT_METRICS), SMU_CAP(BOARD_VOLTAGE), SMU_CAP(PLDM_VERSION), + SMU_CAP(TEMP_METRICS), + SMU_CAP(NPM_METRICS), SMU_CAP(ALL), }; extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache); bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); int smu_v13_0_12_get_max_metrics_size(void); +size_t smu_v13_0_12_get_system_metrics_size(void); int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value); @@ -84,6 +91,12 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics); +int smu_v13_0_12_tables_init(struct smu_context *smu); +void smu_v13_0_12_tables_fini(struct smu_context *smu); +int smu_v13_0_12_get_npm_data(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value); extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; +extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c8f4f6fb4083..c96fa5e49ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) PPTable_t *driver_ppt = smu->smu_table.driver_pptable; SkuTable_t *skutable = &driver_ppt->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + int link_level; + uint32_t smu_pcie_arg; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_7_print_clk_levels, .force_clk_levels = smu_v13_0_7_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 76c1adda83db..f9b0938c57ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 84f9b007b59f..fe00c84b1cc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + u32 min, + u32 max, + bool __always_unused automatic) { - enum smu_message_type msg_set_min, msg_set_max; - int ret = 0; + enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT; + enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT; + int ret = -EINVAL; if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) return -EINVAL; @@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, msg_set_min = SMU_MSG_SetHardMinVcn1; msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; + case SMU_ISPICLK: + msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq; + break; + case SMU_ISPXCLK: + msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq; + break; default: return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); - if (ret) - return ret; + if (min && msg_set_min != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + + if (max && msg_set_max != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); - return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + return ret; } static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, @@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, if (ret) break; - ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); break; default: ret = -EINVAL; @@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, - sclk_max); + sclk_max, + false); if (ret) return ret; @@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + false); if (ret) return ret; } @@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + false); if (ret) return ret; } @@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK1, vclk1_min, - vclk1_max); + vclk1_max, + false); if (ret) return ret; } @@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } @@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK1, dclk1_min, - dclk1_max); + dclk1_max, + false); if (ret) return ret; } @@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 0, NULL); } +static int smu_v14_0_0_set_isp_enable(struct smu_context *smu, + bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile, + ISP_ALL_TILES_MASK, NULL); +} + static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, @@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, + .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .set_mall_enable = smu_v14_0_common_set_mall_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 82c2db972491..086501cc5213 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_14_0_dpm_table *dpm_table; - struct smu_14_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - - if (link_level == 0) - link_level++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - int num_of_levels = pcie_table->num_of_link_levels; + int num_of_levels; uint32_t smu_pcie_arg; - int ret, i; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + num_of_levels = pcie_table->num_of_link_levels; if (!num_of_levels) return 0; @@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } - } - - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, smu_pcie_arg, NULL); - if (ret) - return ret; + if (ret) + break; + } + } } - return 0; + return ret; } static const struct smu_temperature_range smu14_thermal_policy[] = { @@ -1689,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *min_power_limit) { struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; CustomSkuTable_t *skutable = &pptable->CustomSkuTable; - uint32_t power_limit; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v14_0_get_current_power_limit(smu, &power_limit)) @@ -1704,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (max_power_limit) - *max_power_limit = msg_limit; + if (powerplay_table) { + if (smu->od_enabled && + smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } + } - if (min_power_limit) - *min_power_limit = 0; + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); + + if (max_power_limit) { + *max_power_limit = msg_limit * (100 + od_percent_upper); + *max_power_limit /= 100; + } + + if (min_power_limit) { + *min_power_limit = power_limit * (100 + od_percent_lower); + *min_power_limit /= 100; + } return 0; } @@ -2059,10 +2087,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v14_0_2_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2072,27 +2100,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 7eaf58fd7f9a..f532f7c69259 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, #define SMU_RESP_BUSY_OTHER 0xFC #define SMU_RESP_DEBUG_END 0xFB +#define SMU_RESP_UNEXP (~0U) /** * __smu_cmn_poll_stat -- poll for a status from the SMU * @smu: a pointer to SMU context @@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, dev_err_ratelimited(adev->dev, "SMU: I'm debugging!"); break; + case SMU_RESP_UNEXP: + if (amdgpu_device_bus_status_check(smu->adev)) { + /* print error immediately if device is off the bus */ + dev_err(adev->dev, + "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", + reg_c2pmsg_90, msg_index, param, message); + break; + } + fallthrough; default: dev_err_ratelimited(adev->dev, "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", @@ -246,11 +256,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; uint32_t flags, resp; - bool fed_status; + bool fed_status, pri; flags = __smu_cmn_get_msg_flags(smu, msg); *poll = true; + pri = !!(flags & SMU_MSG_NO_PRECHECK); /* When there is RAS fatal error, FW won't process non-RAS priority * messages. Don't allow any messages other than RAS priority messages. */ @@ -262,15 +273,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu, smu_get_message_name(smu, msg)); return -EACCES; } + } + if (pri || fed_status) { /* FW will ignore non-priority messages when a RAS fatal error - * is detected. Hence it is possible that a previous message - * wouldn't have got response. Allow to continue without polling - * for response status for priority messages. + * or reset condition is detected. Hence it is possible that a + * previous message wouldn't have got response. Allow to + * continue without polling for response status for priority + * messages. */ resp = RREG32(smu->resp_reg); dev_dbg(adev->dev, - "Sending RAS priority message %s response status: %x", + "Sending priority message %s response status: %x", smu_get_message_name(smu, msg), resp); if (resp == 0) *poll = false; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 7473672abd2a..d588f74b98de 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,28 +40,55 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 -#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct gpu_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ + struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*tmp); \ } while (0) -#define smu_cmn_init_partition_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_partition_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + +#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + +#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ } while (0) extern const int link_speed[]; diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/Makefile diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h new file mode 100644 index 000000000000..144fbe4ceb9a --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_CORE_STATUS_H__ +#define __RAS_CORE_STATUS_H__ + +#define RAS_CORE_OK 0 +#define RAS_CORE_NOT_SUPPORTED 248 +#define RAS_CORE_FAIL_ERROR_QUERY 249 +#define RAS_CORE_FAIL_ERROR_INJECTION 250 +#define RAS_CORE_FAIL_FATAL_RECOVERY 251 +#define RAS_CORE_FAIL_POISON_CONSUMPTION 252 +#define RAS_CORE_FAIL_POISON_CREATION 253 +#define RAS_CORE_FAIL_NO_VALID_BANKS 254 +#define RAS_CORE_GPU_IN_MODE1_RESET 255 +#endif |