diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-07 08:42:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-07 08:42:54 +0300 |
commit | 5133c9e51de41bfa902153888e11add3342ede18 (patch) | |
tree | 5fc95774298aef777e4700db323976c69142527b /drivers | |
parent | 94e0d43e51ff8577ad273032bb1cacfd68e9297b (diff) | |
parent | 6725f33228077902ddac2a05e0ab361dee36e4ba (diff) | |
download | linux-5133c9e51de41bfa902153888e11add3342ede18.tar.xz |
Merge tag 'drm-next-2023-07-07' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Lots of fixes, mostly i915 and amdgpu. It's two weeks of i915, and I
think three weeks of amdgpu.
fbdev:
- Fix module infos on sparc
panel:
- Fix mode on Starry-ili9882t
i915:
- Allow DC states along with PW2 only for PWB functionality [adlp+]
- Fix SSC selection for MPLLA [mtl]
- Use hw.adjusted mode when calculating io/fast wake times [psr]
- Apply min softlimit correctly [guc/slpc]
- Assign correct hdcp content type [hdcp]
- Add missing forward declarations/includes to display power headers
- Fix BDW PSR AUX CH data register offsets [psr]
- Use mock device info for creating mock device
amdgpu:
- Misc cleanups
- GFX 9.4.3 fixes
- DEBUGFS build fix
- Fix LPDDR5 reporting
- ASPM fixes
- DCN 3.1.4 fixes
- DP MST fixes
- DCN 3.2.x fixes
- Display PSR TCON fixes
- SMU 13.x fixes
- RAS fixes
- Vega12/20 SMU fixes
- PSP flashing cleanup
- GFX9 MCBP fixes
- SR-IOV fixes
- GPUVM clear mappings fix for always valid BOs
- Add FAMS quirk for problematic monitor
- Fix possible UAF
- Better handle monentary temperature fluctuations
- SDMA 4.4.2 fixes
- Fencing fix"
* tag 'drm-next-2023-07-07' of git://anongit.freedesktop.org/drm/drm: (83 commits)
drm/i915: use mock device info for creating mock device
drm/i915/psr: Fix BDW PSR AUX CH data register offsets
drm/amdgpu: Fix potential fence use-after-free v2
drm/amd/pm: avoid unintentional shutdown due to temperature momentary fluctuation
drm/amd/pm: expose swctf threshold setting for legacy powerplay
drm/amd/display: 3.2.241
drm/amd/display: Take full update path if number of planes changed
drm/amd/display: Create debugging mechanism for Gaming FAMS
drm/amd/display: Add monitor specific edid quirk
drm/amd/display: For new fast update path, loop through each surface
drm/amd/display: Remove Phantom Pipe Check When Calculating K1 and K2
drm/amd/display: Limit new fast update path to addr and gamma / color
drm/amd/display: Fix the delta clamping for shaper LUT
drm/amdgpu: Keep non-psp path for partition switch
drm/amd/display: program DPP shaper and 3D LUT if updated
Revert "drm/amd/display: edp do not add non-edid timings"
drm/amdgpu: share drm device for pci amdgpu device with 1st partition device
drm/amd/pm: Add GFX v9.4.3 unique id to sysfs
drm/amd/pm: Enable pp_feature attribute
drm/amdgpu/vcn: Need to unpause dpg before stop dpg
...
Diffstat (limited to 'drivers')
113 files changed, 1249 insertions, 349 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a84bd4a0c421..2f9c14aca73c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -286,6 +286,9 @@ extern int amdgpu_user_partt_mode; #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) +/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ +#define AMDGPU_SWCTF_EXTRA_DELAY 50 + struct amdgpu_xcp_mgr; struct amdgpu_device; struct amdgpu_irq_src; @@ -1277,9 +1280,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); -#define for_each_inst(i, inst_mask) \ - for (i = ffs(inst_mask) - 1; inst_mask; \ - inst_mask &= ~(1U << i), i = ffs(inst_mask) - 1) +#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) +#define for_each_inst(i, inst_mask) \ + for (i = ffs(inst_mask); i-- != 0; \ + i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9ba4817a9148..f4e3c133a16c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1791,6 +1791,15 @@ const struct attribute_group amdgpu_vbios_version_attr_group = { .attrs = amdgpu_vbios_version_attrs }; +int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev) +{ + if (adev->mode_info.atom_context) + return devm_device_add_group(adev->dev, + &amdgpu_vbios_version_attr_group); + + return 0; +} + /** * amdgpu_atombios_fini - free the driver info and callbacks for atombios * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 4153d520e2a3..b639a80ee3fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -217,5 +217,6 @@ int amdgpu_atombios_get_data_table(struct amdgpu_device *adev, void amdgpu_atombios_fini(struct amdgpu_device *adev); int amdgpu_atombios_init(struct amdgpu_device *adev); +int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index ef4b9a41f20a..0b7f4c4d58e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -327,10 +327,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, mem_channel_number = igp_info->v11.umachannelnumber; if (!mem_channel_number) mem_channel_number = 1; - /* channel width is 64 */ - if (vram_width) - *vram_width = mem_channel_number * 64; mem_type = igp_info->v11.memorytype; + if (mem_type == LpDdr5MemType) + mem_channel_width = 32; + else + mem_channel_width = 64; + if (vram_width) + *vram_width = mem_channel_number * mem_channel_width; if (vram_type) *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; @@ -345,10 +348,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, mem_channel_number = igp_info->v21.umachannelnumber; if (!mem_channel_number) mem_channel_number = 1; - /* channel width is 64 */ - if (vram_width) - *vram_width = mem_channel_number * 64; mem_type = igp_info->v21.memorytype; + if (mem_type == LpDdr5MemType) + mem_channel_width = 32; + else + mem_channel_width = 64; + if (vram_width) + *vram_width = mem_channel_number * mem_channel_width; if (vram_type) *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d9503882ea97..040f4cb6ab2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -136,9 +136,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &bo->tbo; - /* One for TTM and two for the CS job */ - p->uf_entry.tv.num_shared = 3; - drm_gem_object_put(gobj); size = amdgpu_bo_size(bo); @@ -912,15 +909,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, mutex_lock(&p->bo_list->bo_list_mutex); - /* One for TTM and one for the CS job */ + /* One for TTM and one for each CS job */ amdgpu_bo_list_for_each_entry(e, p->bo_list) - e->tv.num_shared = 2; + e->tv.num_shared = 1 + p->gang_size; + p->uf_entry.tv.num_shared = 1 + p->gang_size; amdgpu_bo_list_get_list(p->bo_list, &p->validated); INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); + /* Two for VM updates, one for TTM and one for each CS job */ + p->vm_pd.tv.num_shared = 3 + p->gang_size; + if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) list_add(&p->uf_entry.tv.head, &p->validated); @@ -1653,15 +1654,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e25f085ee886..a92c6189b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2552,7 +2552,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = true; /* right after GMC hw init, we create CSA */ - if (amdgpu_mcbp) { + if (adev->gfx.mcbp) { r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, @@ -3673,6 +3673,23 @@ static const struct attribute *amdgpu_dev_attributes[] = { NULL }; +static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) +{ + if (amdgpu_mcbp == 1) + adev->gfx.mcbp = true; + + if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && + adev->gfx.num_gfx_rings) + adev->gfx.mcbp = true; + + if (amdgpu_sriov_vf(adev)) + adev->gfx.mcbp = true; + + if (adev->gfx.mcbp) + DRM_INFO("MCBP is enabled\n"); +} + /** * amdgpu_device_init - initialize the driver * @@ -3824,9 +3841,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - if (amdgpu_mcbp) - DRM_INFO("MCBP is enabled\n"); - /* * Reset domain needs to be present early, before XGMI hive discovered * (if any) and intitialized to use reset sem and in_gpu reset flag @@ -3852,6 +3866,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + amdgpu_device_set_mcbp(adev); + /* Get rid of things like offb */ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); if (r) @@ -4018,6 +4034,11 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); + r = amdgpu_atombios_sysfs_init(adev); + if (r) + drm_err(&adev->ddev, + "registering atombios sysfs failed (%d).\n", r); + r = amdgpu_pm_sysfs_init(adev); if (r) DRM_ERROR("registering pm sysfs failed (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3b711babd4e2..0593ef8fe0a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -180,7 +180,7 @@ uint amdgpu_dc_feature_mask = 2; uint amdgpu_dc_debug_mask; uint amdgpu_dc_visual_confirm; int amdgpu_async_gfx_ring = 1; -int amdgpu_mcbp; +int amdgpu_mcbp = -1; int amdgpu_discovery = -1; int amdgpu_mes; int amdgpu_mes_kiq; @@ -634,10 +634,10 @@ module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444); /** * DOC: mcbp (int) - * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled) + * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default)) */ MODULE_PARM_DESC(mcbp, - "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)"); + "Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)"); module_param_named(mcbp, amdgpu_mcbp, int, 0444); /** @@ -2899,12 +2899,10 @@ static struct pci_error_handlers amdgpu_pci_err_handler = { extern const struct attribute_group amdgpu_vram_mgr_attr_group; extern const struct attribute_group amdgpu_gtt_mgr_attr_group; -extern const struct attribute_group amdgpu_vbios_version_attr_group; static const struct attribute_group *amdgpu_sysfs_groups[] = { &amdgpu_vram_mgr_attr_group, &amdgpu_gtt_mgr_attr_group, - &amdgpu_vbios_version_attr_group, NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ce0f7a8ad4b8..a4ff515ce896 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -434,6 +434,7 @@ struct amdgpu_gfx { uint16_t xcc_mask; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; + bool mcbp; /* mid command buffer preemption */ }; struct amdgpu_gfx_ras_reg_entry { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 3add4b4f0667..2ff2897fd1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -255,7 +255,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if * if (amdgpu_ras_is_supported(adev, ras_block->block)) { for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1 << i) || + !adev->jpeg.inst[i].ras_poison_irq.funcs) continue; r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e3531aa3c8bd..cca5a495611f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -805,7 +805,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->ids_flags = 0; if (adev->flags & AMD_IS_APU) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; - if (amdgpu_mcbp) + if (adev->gfx.mcbp) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; if (amdgpu_is_tmz(adev)) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ; @@ -1247,7 +1247,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto error_vm; } - if (amdgpu_mcbp) { + if (adev->gfx.mcbp) { uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e15c27e05564..6d676bdd1505 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -839,6 +839,7 @@ static bool psp_skip_tmr(struct psp_context *psp) case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 7): case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): return true; default: @@ -2039,6 +2040,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + /* don't try again */ + psp->securedisplay_context.context.bin_desc.size_bytes = 0; } return 0; @@ -3703,7 +3706,6 @@ static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) { int ret = 0; - struct psp_context *psp = &adev->psp; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -3712,10 +3714,6 @@ int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): - if (!psp->adev) { - psp->adev = adev; - psp_v13_0_set_psp_funcs(psp); - } ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); if (ret) dev_err(adev->dev, "Failed to create device file psp_vbflash"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c index 12010c988c8b..123bcf5c2bb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -116,7 +116,6 @@ static const struct file_operations amdgpu_rap_debugfs_ops = { void amdgpu_rap_debugfs_init(struct amdgpu_device *adev) { -#if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; if (!adev->psp.rap_context.context.initialized) @@ -124,5 +123,4 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev) debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root, adev, &amdgpu_rap_debugfs_ops); -#endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4769a18304d7..8aaa427f8c0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2065,6 +2065,14 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; reset_context.method = AMD_RESET_METHOD_MODE2; } + + /* Fatal error occurs in poison mode, mode1 reset is used to + * recover gpu. + */ + if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { + ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + } } amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); @@ -2955,9 +2963,12 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) return; if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + dev_info(adev->dev, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 46bf1889a9d7..ffb49b2d533a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -340,6 +340,7 @@ enum amdgpu_ras_ret { #define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2) #define AMDGPU_RAS_GPU_RESET_MODE2_RESET (0x1 << 0) +#define AMDGPU_RAS_GPU_RESET_MODE1_RESET (0x1 << 1) struct amdgpu_ras_err_status_reg_entry { uint32_t hwip; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index 73516abef662..b779ee4bbaa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -423,6 +423,9 @@ void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mu struct amdgpu_ring_mux *mux = &adev->gfx.muxer; unsigned offset; + if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) + return; + offset = ring->wptr & ring->buf_mask; amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 78ec3420ef85..dacf281d2b21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -72,7 +72,7 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, int r; /* don't enable OS preemption on SDMA under SRIOV */ - if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp) + if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) return 0; if (ring->is_mes_queue) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index acbef1a24b9c..ae455aab5d29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1198,7 +1198,8 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (amdgpu_ras_is_supported(adev, ras_block->block)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) + if (adev->vcn.harvest_config & (1 << i) || + !adev->vcn.inst[i].ras_poison_irq.funcs) continue; r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 25b4d7f0bd35..41aa853a07d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -66,9 +66,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->cg_flags = 0; adev->pg_flags = 0; - /* enable mcbp for sriov */ - amdgpu_mcbp = 1; - /* Reduce kcq number to 2 to reduce latency */ if (amdgpu_num_kcq == -1) amdgpu_num_kcq = 2; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 143d11afe0e5..291977b93b1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1771,18 +1771,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, /* Insert partial mapping before the range */ if (!list_empty(&before->list)) { + struct amdgpu_bo *bo = before->bo_va->base.bo; + amdgpu_vm_it_insert(before, &vm->va); if (before->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); + + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && + !before->bo_va->base.moved) + amdgpu_vm_bo_moved(&before->bo_va->base); } else { kfree(before); } /* Insert partial mapping after the range */ if (!list_empty(&after->list)) { + struct amdgpu_bo *bo = after->bo_va->base.bo; + amdgpu_vm_it_insert(after, &vm->va); if (after->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); + + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && + !after->bo_va->base.moved) + amdgpu_vm_bo_moved(&after->bo_va->base); } else { kfree(after); } @@ -2233,16 +2245,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) return r; - /* Sanity checks */ - if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { - r = -EINVAL; - goto unreserve_bo; - } - /* Check if PD needs to be reinitialized and do it before * changing any other state, in case it fails. */ if (pte_support_ats != vm->pte_support_ats) { + /* Sanity checks */ + if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { + r = -EINVAL; + goto unreserve_bo; + } + vm->pte_support_ats = pte_support_ats; r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index d733fa6e7477..d175e862f222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -132,6 +132,9 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) for (i = 0; i < MAX_XCP; ++i) xcp_mgr->xcp[i].valid = false; + /* This is needed for figuring out memory id of xcp */ + xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions; + for (i = 0; i < num_xcps; ++i) { for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, @@ -157,7 +160,6 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) xcp_mgr->num_xcps = num_xcps; amdgpu_xcp_update_partition_sched_list(adev); - xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions; return 0; } @@ -232,7 +234,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) ddev = adev_to_drm(adev); - for (i = 0; i < MAX_XCP; i++) { + /* xcp #0 shares drm device setting with adev */ + adev->xcp_mgr->xcp->ddev = ddev; + + for (i = 1; i < MAX_XCP; i++) { ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); if (ret) return ret; @@ -322,7 +327,7 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev, if (!adev->xcp_mgr) return 0; - for (i = 0; i < MAX_XCP; i++) { + for (i = 1; i < MAX_XCP; i++) { ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); if (ret) return ret; @@ -339,7 +344,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) if (!adev->xcp_mgr) return; - for (i = 0; i < MAX_XCP; i++) { + for (i = 1; i < MAX_XCP; i++) { p_ddev = adev->xcp_mgr->xcp[i].ddev; drm_dev_unplug(p_ddev); p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index be984f8c71c7..44af8022b89f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -8307,7 +8307,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); if (flags & AMDGPU_IB_PREEMPTED) @@ -8482,7 +8482,7 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, { uint32_t dw2 = 0; - if (amdgpu_mcbp) + if (ring->adev->gfx.mcbp) gfx_v10_0_ring_emit_ce_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 690e121d9dda..3a7af59e83ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5311,7 +5311,7 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); if (flags & AMDGPU_IB_PREEMPTED) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c1ee54d4c3d3..9e3b835bdbb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -623,12 +623,28 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) { - int ret; - - ret = psp_spatial_partition(&adev->psp, NUM_XCC(adev->gfx.xcc_mask) / - num_xccs_per_xcp); - if (ret) - return ret; + int ret, i, num_xcc; + u32 tmp = 0; + + if (adev->psp.funcs) { + ret = psp_spatial_partition(&adev->psp, + NUM_XCC(adev->gfx.xcc_mask) / + num_xccs_per_xcp); + if (ret) + return ret; + } else { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + for (i = 0; i < num_xcc; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xccs_per_xcp); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xccs_per_xcp); + WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, + tmp); + } + ret = 0; + } adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; @@ -1762,6 +1778,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); + if (amdgpu_sriov_vf(adev) && adev->in_suspend) + amdgpu_ring_clear_ring(ring); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); @@ -1960,6 +1978,16 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) if (amdgpu_gfx_disable_kcq(adev, xcc_id)) DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); + if (amdgpu_sriov_vf(adev)) { + /* must disable polling for SRIOV when hw finished, otherwise + * CPC engine may still keep fetching WB address which is already + * invalid after sw finished and trigger DMAR reading error in + * hypervisor side. + */ + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); + return; + } + /* Use deinitialize sequence from CAIL when unbinding device * from driver, otherwise KIQ is hanging when binding back */ @@ -1984,7 +2012,8 @@ static int gfx_v9_4_3_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfx_v9_4_3_init_golden_registers(adev); + if (!amdgpu_sriov_vf(adev)) + gfx_v9_4_3_init_golden_registers(adev); gfx_v9_4_3_constants_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index aa761ff3a5fa..4038455d7998 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -345,8 +345,8 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 -#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms -#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms +#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x0000000A // 1=1us, 9=1ms, 10=4ms +#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, bool enable) @@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); def = data = RREG32_PCIE(smnPCIE_LC_CNTL); - data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; - data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; - data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; + if (pci_is_thunderbolt_attached(adev->pdev)) + data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + else + data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL, data); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index ea5e12390d18..f413898dda37 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -578,6 +578,9 @@ static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable, return; } + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) + return; + for_each_inst(i, inst_mask) { f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1); @@ -899,15 +902,12 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, WREG32_SDMA(i, regSDMA_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - - /* unhalt engine */ - temp = RREG32_SDMA(i, regSDMA_F32_CNTL); - temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0); - WREG32_SDMA(i, regSDMA_F32_CNTL, temp); + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + /* unhalt engine */ + temp = RREG32_SDMA(i, regSDMA_F32_CNTL); + temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0); + WREG32_SDMA(i, regSDMA_F32_CNTL, temp); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index b48bb5212488..259795098173 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -1424,8 +1424,10 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) */ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state); /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9d4abfd8b55e..0b3dc754e06b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -138,9 +138,12 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 0): /* VEGA20 */ case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ - case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; break; + case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ + kfd->device_info.event_interrupt_class = + &event_interrupt_class_v9_4_3; + break; case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ @@ -518,6 +521,7 @@ static int kfd_gws_init(struct kfd_node *node) && kfd->mec2_fw_version >= 0x30) || (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) && kfd->mec2_fw_version >= 0x28) || + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) || (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) && kfd->mec2_fw_version >= 0x6b)))) @@ -598,6 +602,41 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) } } +static void kfd_setup_interrupt_bitmap(struct kfd_node *node, + unsigned int kfd_node_idx) +{ + struct amdgpu_device *adev = node->adev; + uint32_t xcc_mask = node->xcc_mask; + uint32_t xcc, mapped_xcc; + /* + * Interrupt bitmap is setup for processing interrupts from + * different XCDs and AIDs. + * Interrupt bitmap is defined as follows: + * 1. Bits 0-15 - correspond to the NodeId field. + * Each bit corresponds to NodeId number. For example, if + * a KFD node has interrupt bitmap set to 0x7, then this + * KFD node will process interrupts with NodeId = 0, 1 and 2 + * in the IH cookie. + * 2. Bits 16-31 - unused. + * + * Please note that the kfd_node_idx argument passed to this + * function is not related to NodeId field received in the + * IH cookie. + * + * In CPX mode, a KFD node will process an interrupt if: + * - the Node Id matches the corresponding bit set in + * Bits 0-15. + * - AND VMID reported in the interrupt lies within the + * VMID range of the node. + */ + for_each_inst(xcc, xcc_mask) { + mapped_xcc = GET_INST(GC, xcc); + node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2)); + } + dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx, + node->interrupt_bitmap); +} + bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { @@ -797,6 +836,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, amdgpu_amdkfd_get_local_mem_info(kfd->adev, &node->local_mem_info, node->xcp); + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) + kfd_setup_interrupt_bitmap(node, i); + /* Initialize the KFD node */ if (kfd_init_node(node)) { dev_err(kfd_device, "Error initializing KFD node\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index d5c9f30552e3..f0731a6a5306 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -446,7 +446,36 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, } } +static bool event_interrupt_isr_v9_4_3(struct kfd_node *node, + const uint32_t *ih_ring_entry, + uint32_t *patched_ihre, + bool *patched_flag) +{ + uint16_t node_id, vmid; + + /* + * For GFX 9.4.3, process the interrupt if: + * - NodeID field in IH entry matches the corresponding bit + * set in interrupt_bitmap Bits 0-15. + * OR + * - If partition mode is CPX and interrupt came from + * Node_id 0,4,8,12, then check if the Bit (16 + client id) + * is set in interrupt bitmap Bits 16-31. + */ + node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (kfd_irq_is_from_node(node, node_id, vmid)) + return event_interrupt_isr_v9(node, ih_ring_entry, + patched_ihre, patched_flag); + return false; +} + const struct kfd_event_interrupt_class event_interrupt_class_v9 = { .interrupt_isr = event_interrupt_isr_v9, .interrupt_wq = event_interrupt_wq_v9, }; + +const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = { + .interrupt_isr = event_interrupt_isr_v9_4_3, + .interrupt_wq = event_interrupt_wq_v9, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 7364a5d77c6e..d4c9ee3f9953 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1444,6 +1444,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; extern const struct kfd_event_interrupt_class event_interrupt_class_v9; +extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3; extern const struct kfd_event_interrupt_class event_interrupt_class_v10; extern const struct kfd_event_interrupt_class event_interrupt_class_v11; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 3d3611705d41..a844e68211ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2142,6 +2142,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) int kfd_process_drain_interrupts(struct kfd_process_device *pdd) { uint32_t irq_drain_fence[8]; + uint8_t node_id = 0; int r = 0; if (!KFD_IS_SOC15(pdd->dev)) @@ -2154,6 +2155,14 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd) KFD_IRQ_FENCE_CLIENTID; irq_drain_fence[3] = pdd->process->pasid; + /* + * For GFX 9.4.3, send the NodeId also in IH cookie DW[3] + */ + if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) { + node_id = ffs(pdd->dev->interrupt_bitmap) - 1; + irq_drain_fence[3] |= node_id << 16; + } + /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, irq_drain_fence)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 9ad1a2186a24..ba9d69054119 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -123,16 +123,24 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, if (!gws && pdd->qpd.num_gws == 0) return -EINVAL; - if (gws) - ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, - gws, &mem); - else - ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info, - pqn->q->gws); - if (unlikely(ret)) - return ret; + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) { + if (gws) + ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, + gws, &mem); + else + ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info, + pqn->q->gws); + if (unlikely(ret)) + return ret; + pqn->q->gws = mem; + } else { + /* + * Intentionally set GWS to a non-NULL value + * for GFX 9.4.3. + */ + pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL; + } - pqn->q->gws = mem; pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, @@ -164,7 +172,8 @@ void pqm_uninit(struct process_queue_manager *pqm) struct process_queue_node *pqn, *next; list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { - if (pqn->q && pqn->q->gws) + if (pqn->q && pqn->q->gws && + KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info, pqn->q->gws); kfd_procfs_del_queue(pqn->q); @@ -446,8 +455,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) } if (pqn->q->gws) { - amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info, - pqn->q->gws); + if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) + amdgpu_amdkfd_remove_gws_from_process( + pqm->process->kgd_process_info, + pqn->q->gws); pdd->qpd.num_gws = 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 90b86a6ac7bd..61fc62f3e003 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -2107,6 +2107,10 @@ int kfd_topology_add_device(struct kfd_node *gpu) if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; + if (dev->gpu->adev->gmc.is_app_apu || + dev->gpu->adev->gmc.xgmi.connected_to_cpu) + dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS; + kfd_debug_print_topology(); kfd_notify_gpu_change(gpu_id, 1); diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h index e3f3b0b93a59..10138676f27f 100644 --- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h +++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h @@ -40,6 +40,7 @@ #define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf) #define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1) #define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff) +#define SOC15_NODEID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) >> 16 & 0xff) #define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4])) #define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5])) #define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6])) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 514f6785a020..ff0a217b9d56 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5063,11 +5063,7 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, s32 y, s32 width, s32 height, int *i, bool ffu) { - if (*i > DC_MAX_DIRTY_RECTS) - return; - - if (*i == DC_MAX_DIRTY_RECTS) - goto out; + WARN_ON(*i >= DC_MAX_DIRTY_RECTS); dirty_rect->x = x; dirty_rect->y = y; @@ -5083,7 +5079,6 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", plane->base.id, x, y, width, height); -out: (*i)++; } @@ -5170,6 +5165,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, *dirty_regions_changed = bb_changed; + if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) + goto ffu; + if (bb_changed) { fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], new_plane_state->crtc_x, @@ -5199,9 +5197,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, new_plane_state->crtc_h, &i, false); } - if (i > DC_MAX_DIRTY_RECTS) - goto ffu; - flip_addrs->dirty_rect_count = i; return; @@ -7258,13 +7253,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - /* most eDP supports only timings from its edid, - * usually only detailed timings are available - * from eDP edid. timings which are not from edid - * may damage eDP - */ - if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) - amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 5ea3284b2b77..d63ee636483b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -336,6 +336,153 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, return size; } +static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) +{ + bool is_end_device = false; + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + + if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) + is_end_device = true; + drm_modeset_unlock(&mgr->base.lock); + } + + return is_end_device; +} + +/* Change MST link setting + * + * valid lane count value: 1, 2, 4 + * valid link rate value: + * 06h = 1.62Gbps per lane + * 0Ah = 2.7Gbps per lane + * 0Ch = 3.24Gbps per lane + * 14h = 5.4Gbps per lane + * 1Eh = 8.1Gbps per lane + * 3E8h = 10.0Gbps per lane + * 546h = 13.5Gbps per lane + * 7D0h = 20.0Gbps per lane + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * for example, to force to 2 lane, 10.0GHz, + * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * Valid input will trigger hotplug event to get new link setting applied + * Invalid input will trigger training setting reset + * + * The usage can be referred to link_settings entry + * + */ +static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (!dp_mst_is_end_device(aconnector)) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + switch (param[1]) { + case LINK_RATE_LOW: + case LINK_RATE_HIGH: + case LINK_RATE_RBR2: + case LINK_RATE_HIGH2: + case LINK_RATE_HIGH3: + case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: + break; + default: + valid_input = false; + break; + } + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.use_link_rate_set = false; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.link_rate = param[1]; + + /* skip immediate retrain, and train to new link setting after hotplug event triggered */ + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); + mutex_unlock(&adev->dm.dc_lock); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_OFF; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + msleep(100); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_UNSPECIFIED; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + kfree(wr_buf); + return size; +} + /* function: get current DP PHY settings: voltage swing, pre-emphasis, * post-cursor2 (defined by VESA DP specification) * @@ -2668,6 +2815,12 @@ static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_mst_link_settings_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_mst_link_setting, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -2691,7 +2844,8 @@ static const struct { {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, {"is_mst_connector", &dp_is_mst_connector_fops}, {"mst_progress_status", &dp_mst_progress_status_fops}, - {"is_dpia_link", &is_dpia_link_fops} + {"is_dpia_link", &is_dpia_link_fops}, + {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} }; static const struct { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index cd20cfc04996..d9a482908380 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -44,6 +44,30 @@ #include "dm_helpers.h" #include "ddc_service_types.h" +static u32 edid_extract_panel_id(struct edid *edid) +{ + return (u32)edid->mfg_id[0] << 24 | + (u32)edid->mfg_id[1] << 16 | + (u32)EDID_PRODUCT_ID(edid); +} + +static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) +{ + uint32_t panel_id = edid_extract_panel_id(edid); + + switch (panel_id) { + /* Workaround for some monitors which does not work well with FAMS */ + case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): + DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_fams = true; + break; + default: + return; + } +} + /* dm_helpers_parse_edid_caps * * Parse edid caps @@ -115,6 +139,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; + apply_edid_quirks(edid_buf, edid_caps); + kfree(sads); kfree(sadb); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index d647f68fd563..4f61d4f257cd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -24,6 +24,7 @@ */ #include "amdgpu_dm_psr.h" +#include "dc_dmub_srv.h" #include "dc.h" #include "dm_helpers.h" #include "amdgpu_dm.h" @@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; - return true; + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 6a811755e2e6..cb992aca760d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -541,9 +541,18 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ - if (!clk_mgr_base->clks.p_state_change_support) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + if (!clk_mgr_base->clks.p_state_change_support) { + if (dc->clk_mgr->dc_mode_softmax_enabled) { + /* On DCN32x we will never have the functional UCLK min above the softmax + * since we calculate mode support based on softmax being the max UCLK + * frequency. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + } else { + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + } + } } /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */ @@ -808,8 +817,7 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base) if (!clk_mgr->smu_present) return; - dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->max_memclk_mhz); } /* Get current memclk states, update bounding box */ @@ -827,6 +835,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz; /* memclk must have at least one level */ num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; @@ -841,7 +850,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) } else { num_levels = num_entries_per_clk->num_fclk_levels; } - + clk_mgr_base->bw_params->max_memclk_mhz = + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz; clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; if (clk_mgr->dpm_present && !num_levels) @@ -894,6 +904,25 @@ static bool dcn32_is_smu_present(struct clk_mgr *clk_mgr_base) return clk_mgr->smu_present; } +static void dcn32_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} + +static void dcn32_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} static struct clk_mgr_funcs dcn32_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, @@ -904,6 +933,8 @@ static struct clk_mgr_funcs dcn32_funcs = { .notify_wm_ranges = dcn32_notify_wm_ranges, .set_hard_min_memclk = dcn32_set_hard_min_memclk, .set_hard_max_memclk = dcn32_set_hard_max_memclk, + .set_max_memclk = dcn32_set_max_memclk, + .set_min_memclk = dcn32_set_min_memclk, .get_memclk_states_from_smu = dcn32_get_memclk_states_from_smu, .are_clock_states_equal = dcn32_are_clock_states_equal, .enable_pme_wa = dcn32_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index dd3a9d06c6e2..d133e4186a52 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1629,6 +1629,9 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; } + if (dc->debug.force_odm_combine) + return false; + /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; @@ -3577,6 +3580,13 @@ static void commit_planes_for_stream_fast(struct dc *dc, hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); + /* Clear update flags so next flip doesn't have redundant programming + * (if there's no stream update, the update flags are not cleared). + */ + if (top_pipe_to_program->plane_state) + top_pipe_to_program->plane_state->update_flags.raw = 0; + if (top_pipe_to_program->stream) + top_pipe_to_program->stream->update_flags.raw = 0; } static void commit_planes_for_stream(struct dc *dc, @@ -4233,6 +4243,117 @@ static void update_seamless_boot_flags(struct dc *dc, } } +static void populate_fast_updates(struct dc_fast_update *fast_update, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update) +{ + int i = 0; + + if (stream_update) { + fast_update[0].out_transfer_func = stream_update->out_transfer_func; + fast_update[0].output_csc_transform = stream_update->output_csc_transform; + } + + for (i = 0; i < surface_count; i++) { + fast_update[i].flip_addr = srf_updates[i].flip_addr; + fast_update[i].gamma = srf_updates[i].gamma; + fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; + fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; + fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; + } +} + +static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) +{ + int i; + + if (fast_update[0].out_transfer_func || + fast_update[0].output_csc_transform) + return true; + + for (i = 0; i < surface_count; i++) { + if (fast_update[i].flip_addr || + fast_update[i].gamma || + fast_update[i].gamut_remap_matrix || + fast_update[i].input_csc_color_matrix || + fast_update[i].coeff_reduction_factor) + return true; + } + + return false; +} + +static bool full_update_required(struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update, + struct dc_stream_state *stream) +{ + + int i; + struct dc_stream_status *stream_status; + + for (i = 0; i < surface_count; i++) { + if (srf_updates && + (srf_updates[i].plane_info || + srf_updates[i].scaling_info || + (srf_updates[i].hdr_mult.value && + srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || + srf_updates[i].in_transfer_func || + srf_updates[i].func_shaper || + srf_updates[i].lut3d_func || + srf_updates[i].blend_tf)) + return true; + } + + if (stream_update && + (((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || + stream_update->integer_scaling_update) || + stream_update->hdr_static_metadata || + stream_update->abm_level || + stream_update->periodic_interrupt || + stream_update->vrr_infopacket || + stream_update->vsc_infopacket || + stream_update->vsp_infopacket || + stream_update->hfvsif_infopacket || + stream_update->vtem_infopacket || + stream_update->adaptive_sync_infopacket || + stream_update->dpms_off || + stream_update->allow_freesync || + stream_update->vrr_active_variable || + stream_update->vrr_active_fixed || + stream_update->gamut_remap || + stream_update->output_color_space || + stream_update->dither_option || + stream_update->wb_update || + stream_update->dsc_config || + stream_update->mst_bw_update || + stream_update->func_shaper || + stream_update->lut3d_func || + stream_update->pending_test_pattern || + stream_update->crtc_timing_adjust)) + return true; + + if (stream) { + stream_status = dc_stream_get_status(stream); + if (stream_status == NULL || stream_status->plane_count != surface_count) + return true; + } + + return false; +} + +static bool fast_update_only(struct dc_fast_update *fast_update, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update, + struct dc_stream_state *stream) +{ + return fast_updates_exist(fast_update, surface_count) + && !full_update_required(srf_updates, surface_count, stream_update, stream); +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4242,6 +4363,7 @@ bool dc_update_planes_and_stream(struct dc *dc, enum surface_update_type update_type; int i; struct mall_temp_config mall_temp_config; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can * cause underflow. Apply stream configuration with minimal pipe @@ -4250,6 +4372,7 @@ bool dc_update_planes_and_stream(struct dc *dc, bool force_minimal_pipe_splitting; bool is_plane_addition; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( dc, stream, @@ -4300,7 +4423,8 @@ bool dc_update_planes_and_stream(struct dc *dc, } update_seamless_boot_flags(dc, context, surface_count, stream); - if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -4357,7 +4481,9 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; int i, j; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -4443,7 +4569,8 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); update_seamless_boot_flags(dc, context, surface_count, stream); - if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -4753,15 +4880,17 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl */ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) { - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - unsigned int softMax, maxDPM, funcMin; + unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; bool p_state_change_support; - if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + if (!dc->config.dc_mode_clk_limit_support) return; softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; - maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; + for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { + if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) + maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; + } funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; p_state_change_support = dc->clk_mgr->clks.p_state_change_support; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index d7d00fefaab9..cb2bf9a466f5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -610,7 +610,7 @@ void hwss_build_fast_sequence(struct dc *dc, current_mpc_pipe = current_pipe; while (current_mpc_pipe) { - if (!current_mpc_pipe->bottom_pipe && !pipe_ctx->next_odm_pipe && + if (!current_mpc_pipe->bottom_pipe && !current_mpc_pipe->next_odm_pipe && current_mpc_pipe->stream && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.bits.addr_update && !current_mpc_pipe->plane_state->skip_manual_trigger) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 26d05e225088..63948170fd6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.239" +#define DC_VER "3.2.241" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -416,7 +416,7 @@ struct dc_config { uint8_t force_bios_fixed_vs; int sdpif_request_limit_words_per_umc; bool use_old_fixed_vs_sequence; - bool disable_subvp_drr; + bool dc_mode_clk_limit_support; }; enum visual_confirm { @@ -850,6 +850,7 @@ struct dc_debug_options { /* Enable dmub aux for legacy ddc */ bool enable_dmub_aux_for_legacy_ddc; bool disable_fams; + bool disable_fams_gaming; /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; bool enable_driver_sequence_debug; @@ -1264,6 +1265,16 @@ struct dc_scaling_info { struct scaling_taps scaling_quality; }; +struct dc_fast_update { + const struct dc_flip_addrs *flip_addr; + const struct dc_gamma *gamma; + const struct colorspace_transform *gamut_remap_matrix; + const struct dc_csc_transform *input_csc_color_matrix; + const struct fixed31_32 *coeff_reduction_factor; + struct dc_transfer_func *out_transfer_func; + struct dc_csc_transform *output_csc_transform; +}; + struct dc_surface_update { struct dc_plane_state *surface; @@ -1525,6 +1536,7 @@ struct dc_link { bool dpia_forced_tbt3_mode; bool dongle_mode_timing_override; bool blank_stream_on_ocs_change; + bool read_dpcd204h_on_irq_hpd; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index c52c40b16387..c753c6f30dd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1011,3 +1011,10 @@ void dc_send_update_cursor_info_to_dmu( dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } + +bool dc_dmub_check_min_version(struct dmub_srv *srv) +{ + if (!srv->hw_funcs.is_psrsu_supported) + return true; + return srv->hw_funcs.is_psrsu_supported(srv); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index a5196a9292b3..099f94b6107c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -86,4 +86,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx); +bool dc_dmub_check_min_version(struct dmub_srv *srv); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index e6c06325742a..168cb7094c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -266,7 +266,24 @@ type MASTER_COMM_INTERRUPT; \ type MASTER_COMM_CMD_REG_BYTE0; \ type MASTER_COMM_CMD_REG_BYTE1; \ - type MASTER_COMM_CMD_REG_BYTE2 + type MASTER_COMM_CMD_REG_BYTE2; \ + type ABM1_HG_BIN_33_40_SHIFT_INDEX; \ + type ABM1_HG_BIN_33_64_SHIFT_FLAG; \ + type ABM1_HG_BIN_41_48_SHIFT_INDEX; \ + type ABM1_HG_BIN_49_56_SHIFT_INDEX; \ + type ABM1_HG_BIN_57_64_SHIFT_INDEX; \ + type ABM1_HG_RESULT_DATA; \ + type ABM1_HG_RESULT_INDEX; \ + type ABM1_ACE_SLOPE_DATA; \ + type ABM1_ACE_OFFSET_DATA; \ + type ABM1_ACE_OFFSET_SLOPE_INDEX; \ + type ABM1_ACE_THRES_INDEX; \ + type ABM1_ACE_IGNORE_MASTER_LOCK_EN; \ + type ABM1_ACE_READBACK_DB_REG_VALUE_EN; \ + type ABM1_ACE_DBUF_REG_UPDATE_PENDING; \ + type ABM1_ACE_LOCK; \ + type ABM1_ACE_THRES_DATA_1; \ + type ABM1_ACE_THRES_DATA_2 struct dce_abm_shift { ABM_REG_FIELD_LIST(uint8_t); @@ -288,6 +305,16 @@ struct dce_abm_registers { uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES; uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS; uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0; + uint32_t DC_ABM1_ACE_OFFSET_SLOPE_DATA; + uint32_t DC_ABM1_ACE_PWL_CNTL; + uint32_t DC_ABM1_HG_BIN_33_40_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_33_64_SHIFT_FLAG; + uint32_t DC_ABM1_HG_BIN_41_48_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_49_56_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_57_64_SHIFT_INDEX; + uint32_t DC_ABM1_HG_RESULT_DATA; + uint32_t DC_ABM1_HG_RESULT_INDEX; + uint32_t DC_ABM1_ACE_THRES_DATA; uint32_t DC_ABM1_ACE_THRES_12; uint32_t MASTER_COMM_CNTL_REG; uint32_t MASTER_COMM_CMD_REG; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 808855886183..e115ff91aaaa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -974,10 +974,12 @@ enum dc_status resource_map_phy_clock_resources( || dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->clock_source = dc->res_pool->dp_clock_source; - else - pipe_ctx->clock_source = find_matching_pll( - &context->res_ctx, dc->res_pool, - stream); + else { + if (stream && stream->link && stream->link->link_enc) + pipe_ctx->clock_source = find_matching_pll( + &context->res_ctx, dc->res_pool, + stream); + } if (pipe_ctx->clock_source == NULL) return DC_NO_CLOCK_SOURCE_RESOURCE; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 7a00fe525dfb..3538973bd0c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float( #define NUMBER_REGIONS 32 #define NUMBER_SW_SEGMENTS 16 -bool cm_helper_translate_curve_to_hw_format( +#define DC_LOGGER \ + ctx->logger + +bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint) { @@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format( rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); + if (fixpoint == true) { - rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); - rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); - rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); + uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red); + uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green); + uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue); + + if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10) + DC_LOG_WARNING("Losing delta precision while programming shaper LUT."); + + rgb->delta_red_reg = red_clamp & 0x3ff; + rgb->delta_green_reg = green_clamp & 0x3ff; + rgb->delta_blue_reg = blue_clamp & 0x3ff; rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 3b8cd7410498..0a68b63d6126 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float( bool fixpoint); bool cm_helper_translate_curve_to_hw_format( + struct dc_context *ctx, const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 20a1582be0b1..a50309039d08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1843,7 +1843,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full * update. */ - else if (cm_helper_translate_curve_to_hw_format( + else if (cm_helper_translate_curve_to_hw_format(dc->ctx, stream->out_transfer_func, &dpp->regamma_params, false)) { dpp->funcs->dpp_program_regamma_pwl( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index eaf9e9ccad2a..4492bc2392b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -867,7 +867,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, params = &stream->out_transfer_func->pwl; else if (pipe_ctx->stream->out_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(dc->ctx, stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; @@ -896,7 +896,7 @@ bool dcn20_set_blend_lut( if (plane_state->blend_tf->type == TF_TYPE_HWPWL) blend_lut = &plane_state->blend_tf->pwl; else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->blend_tf, &dpp_base->regamma_params, false); blend_lut = &dpp_base->regamma_params; @@ -918,7 +918,7 @@ bool dcn20_set_shaper_3dlut( if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) shaper_lut = &plane_state->in_shaper_func->pwl; else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->in_shaper_func, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; @@ -1764,8 +1764,9 @@ static void dcn20_program_pipe( hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change || + pipe_ctx->plane_state->update_flags.bits.lut_3d) hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 6a3d3a0ec0a3..701c7d8bc038 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func( dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL); if (dwb_ogam_lut) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(dwbc->ctx, in_transfer_func_dwb_ogam, dwb_ogam_lut, false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index b9753867d97b..bf8864bc8a99 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -106,7 +106,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, if (stream->func_shaper->type == TF_TYPE_HWPWL) { shaper_lut = &stream->func_shaper->pwl; } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(stream->func_shaper, + cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 1a0284a068b2..abe4c12a10b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -725,7 +725,8 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { @@ -1986,11 +1987,10 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context)) return false; - // check if freesync enabled if (!context->streams[0]->allow_freesync) return false; - if (context->streams[0]->vrr_active_variable) + if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming) return false; context->streams[0]->fpo_in_use = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 7dc065ea247a..5ad6a22ee47d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -95,7 +95,8 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 6d9761395288..45956ef6f3f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1190,6 +1190,7 @@ static bool dcn303_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index cf23d7bc560a..0746ed31d1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -332,7 +332,7 @@ static void dccg314_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (dccg->dpp_clock_gated[dpp_inst] == clock_on) + if (dccg->dpp_clock_gated[dpp_inst] != clock_on) return; if (clock_on) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 7a43f8868500..4d2820ffe468 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -337,13 +337,14 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } -void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) +unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) { struct dc_stream_state *stream = pipe_ctx->stream; + unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); - get_odm_config(pipe_ctx, NULL); + odm_combine_factor = get_odm_config(pipe_ctx, NULL); if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; @@ -361,11 +362,15 @@ void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; + if (odm_combine_factor == 2) + *k2_div = PIXEL_RATE_DIV_BY_2; } } if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA)) ASSERT(false); + + return odm_combine_factor; } void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) @@ -424,27 +429,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on); } -void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) -{ - struct dc_context *ctx = hws->ctx; - union dmub_rb_cmd cmd; - - if (hws->ctx->dc->debug.disable_hubp_power_gate) - return; - - PERF_TRACE(); - - memset(&cmd, 0, sizeof(cmd)); - cmd.domain_control.header.type = DMUB_CMD__VBIOS; - cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL; - cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data); - cmd.domain_control.data.inst = hubp_inst; - cmd.domain_control.data.power_gate = !power_on; - - dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); - - PERF_TRACE(); -} static void apply_symclk_on_tx_off_wa(struct dc_link *link) { /* There are use cases where SYMCLK is referenced by OTG. For instance diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 96035c75e0df..eafcc4ea6d24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -37,14 +37,12 @@ void dcn314_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool po void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); -void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); +unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); -void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); - void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 86d6a514dec0..ca8fe55c33b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -139,7 +139,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn314_enable_power_gating_plane, .dpp_root_clock_control = dcn314_dpp_root_clock_control, - .hubp_pg_control = dcn314_hubp_pg_control, + .hubp_pg_control = dcn31_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn314_update_odm, .dsc_pg_control = dcn314_dsc_pg_control, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index a840b008d660..6a9024aa3285 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1883,13 +1883,6 @@ static bool dcn314_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; - /* Disable pipe power gating when unsupported */ - if (ctx->asic_id.hw_internal_rev == 0x01 || - ctx->asic_id.hw_internal_rev == 0x80) { - dc->debug.disable_dpp_power_gate = true; - dc->debug.disable_hubp_power_gate = true; - } - /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -1910,6 +1903,14 @@ static bool dcn314_resource_construct( dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; + + /* Disable pipe power gating */ + dc->debug.disable_dpp_power_gate = true; + dc->debug.disable_hubp_power_gate = true; + + /* Disable root clock optimization */ + dc->debug.root_clock_optimization.u32All = 0; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index f1153941907e..df3a438abda8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1610,7 +1610,7 @@ static int source_format_to_bpp (enum source_format_class SourcePixelFormat) { if (SourcePixelFormat == dm_444_64) return 8; - else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) + else if (SourcePixelFormat == dm_444_16) return 2; else if (SourcePixelFormat == dm_444_8) return 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index 2d604f7ee782..ca5b4b28a664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -179,6 +179,7 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp32_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index c586468872e2..d52d5feeb311 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -448,7 +448,7 @@ bool dcn32_set_mpc_shaper_3dlut( if (stream->func_shaper->type == TF_TYPE_HWPWL) shaper_lut = &stream->func_shaper->pwl; else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; @@ -484,7 +484,7 @@ bool dcn32_set_mcm_luts( if (plane_state->blend_tf->type == TF_TYPE_HWPWL) lut_params = &plane_state->blend_tf->pwl; else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->blend_tf, &dpp_base->regamma_params, false); lut_params = &dpp_base->regamma_params; @@ -499,7 +499,7 @@ bool dcn32_set_mcm_luts( else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace ASSERT(false); - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->in_shaper_func, &dpp_base->shaper_params, true); lut_params = &dpp_base->shaper_params; @@ -1141,16 +1141,14 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * } } -void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) +unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) { struct dc_stream_state *stream = pipe_ctx->stream; + unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; - // For phantom pipes, use the same programming as the main pipes - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { - stream = pipe_ctx->stream->mall_stream_config.paired_stream; - } two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); + odm_combine_factor = get_odm_config(pipe_ctx, NULL); if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; @@ -1168,13 +1166,15 @@ void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int * } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; - if (dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) + if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) *k2_div = PIXEL_RATE_DIV_BY_2; } } if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA)) ASSERT(false); + + return odm_combine_factor; } void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index bf9bffabe0c0..2d2628f31bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -71,7 +71,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context); void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); -void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); +unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index c2490e16a66a..777b2fac20c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -56,6 +56,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 19f134caa8ad..1cc09799f92d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -732,6 +732,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dp_plus_plus_wa = true, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, }; static struct dce_aux *dcn32_aux_engine_create( @@ -2214,6 +2215,7 @@ static bool dcn32_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index a9c41ef0751f..5be242a1b82c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -595,11 +595,10 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us)) return NULL; - // check if freesync enabled if (!fpo_candidate_stream->allow_freesync) return NULL; - if (fpo_candidate_stream->vrr_active_variable) + if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming) return NULL; return fpo_candidate_stream; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index ea204742ad35..a53478e15ce3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -730,6 +730,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_subvp_high_refresh = false, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, + .disable_dc_mode_overwrite = true, }; static struct dce_aux *dcn321_aux_engine_create( @@ -1754,6 +1756,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6266b0788387..7bf4bb7ad044 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -4356,12 +4356,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); - - locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( - locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], - locals->EffectiveLBLatencyHidingSourceLinesChroma), - locals->SwathHeightCPerState[i][j][k]); + if (locals->LinesInDETChroma) { + locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + + dml_min(locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * + locals->BytePerPixelInDETC[k] * + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], + locals->EffectiveLBLatencyHidingSourceLinesChroma), + locals->SwathHeightCPerState[i][j][k]); + } else { + locals->EffectiveDETLBLinesChroma = 0; + } if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index c9afddd11589..d9e049e7ff0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -33,7 +33,7 @@ #include "dml/display_mode_vba.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { - .VBlankNomDefaultUS = 800, + .VBlankNomDefaultUS = 668, .gpuvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_enable = 1, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index e2bb2b9971f3..a95034801712 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -485,24 +485,20 @@ static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) } } -void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, +static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, struct _vcs_dpi_voltage_scaling_st *entry) { int i = 0; int index = 0; - float net_bw_of_new_state = 0; dc_assert_fp_enabled(); - get_optimal_ntuple(entry); - if (*num_entries == 0) { table[0] = *entry; (*num_entries)++; } else { - net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry); - while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) { + while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) { index++; if (index >= *num_entries) break; @@ -2349,6 +2345,63 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; } +static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry, + struct _vcs_dpi_voltage_scaling_st *second_entry) +{ + struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry; + *first_entry = *second_entry; + *second_entry = temp_entry; +} + +/* + * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK + */ +static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + unsigned int start_index = 0; + unsigned int end_index = 0; + unsigned int current_bw = 0; + + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + current_bw = table[i].net_bw_in_kbytes_sec; + start_index = i; + end_index = ++i; + + while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw)) + end_index = ++i; + } + + if (start_index != end_index) { + for (int j = start_index; j < end_index; j++) { + for (int k = start_index; k < end_index; k++) { + if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz) + swap_table_entries(&table[k], &table[k+1]); + } + } + } + + start_index = 0; + end_index = 0; + + } +} + +/* + * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing + * and remove entries that do not + */ +static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) || + (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz)) + remove_entry_from_table_at_index(table, num_entries, i); + } + } +} + /* * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings * Input: @@ -2480,6 +2533,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -2488,6 +2543,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); // Insert the UCLK DPMS @@ -2496,6 +2553,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -2506,6 +2565,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } } @@ -2515,6 +2576,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -2530,6 +2593,21 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk remove_entry_from_table_at_index(table, num_entries, i); } + // Insert entry with all max dc limits without bandwidth matching + if (!disable_dc_mode_overwrite) { + struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry; + + max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; + max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz; + max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16; + + max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry); + insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry); + + sort_entries_with_same_bw(table, num_entries); + remove_inconsistent_entries(table, num_entries); + } + // At this point, the table only contains supported points of interest // it could be used as is, but some states may be redundant due to // coarse grained nature of some clocks, so we want to round up to diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index a4206b71d650..defbee866be6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -39,10 +39,6 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, uint8_t dcn32_predict_pipe_split(struct dc_state *context, display_e2e_pipe_params_st *pipe_e2e); -void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, - unsigned int *num_entries, - struct _vcs_dpi_voltage_scaling_st *entry); - void dcn32_set_phantom_stream_timing(struct dc *dc, struct dc_state *context, struct pipe_ctx *ref_pipe, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index f0683fd9d3f0..b26fcf86014c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -207,24 +207,20 @@ static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st * return limiting_bw_kbytes_sec; } -void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, +static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, struct _vcs_dpi_voltage_scaling_st *entry) { int i = 0; int index = 0; - float net_bw_of_new_state = 0; dc_assert_fp_enabled(); - get_optimal_ntuple(entry); - if (*num_entries == 0) { table[0] = *entry; (*num_entries)++; } else { - net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry); - while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) { + while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) { index++; if (index >= *num_entries) break; @@ -252,6 +248,63 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); } +static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry, + struct _vcs_dpi_voltage_scaling_st *second_entry) +{ + struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry; + *first_entry = *second_entry; + *second_entry = temp_entry; +} + +/* + * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK + */ +static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + unsigned int start_index = 0; + unsigned int end_index = 0; + unsigned int current_bw = 0; + + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + current_bw = table[i].net_bw_in_kbytes_sec; + start_index = i; + end_index = ++i; + + while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw)) + end_index = ++i; + } + + if (start_index != end_index) { + for (int j = start_index; j < end_index; j++) { + for (int k = start_index; k < end_index; k++) { + if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz) + swap_table_entries(&table[k], &table[k+1]); + } + } + } + + start_index = 0; + end_index = 0; + + } +} + +/* + * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing + * and remove entries that do not follow this order + */ +static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) || + (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz)) + remove_entry_from_table_at_index(table, num_entries, i); + } + } +} + /* * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings * Input: @@ -362,11 +415,11 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk if (max_clk_data.fclk_mhz == 0) max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * - dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / - dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; + dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_21_soc.pct_ideal_fabric_bw_after_urgent; if (max_clk_data.phyclk_mhz == 0) - max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; + max_clk_data.phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; entry.dispclk_mhz = max_clk_data.dispclk_mhz; @@ -374,8 +427,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.dppclk_mhz = max_clk_data.dppclk_mhz; entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; entry.phyclk_mhz = max_clk_data.phyclk_mhz; - entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; - entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; + entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; + entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; // Insert all the DCFCLK STAs for (i = 0; i < num_dcfclk_stas; i++) { @@ -383,6 +436,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -391,6 +446,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); // Insert the UCLK DPMS @@ -399,6 +456,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = 0; entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -409,6 +468,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } } @@ -418,6 +479,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -433,6 +496,23 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk remove_entry_from_table_at_index(table, num_entries, i); } + // Insert entry with all max dc limits without bandwitch matching + if (!disable_dc_mode_overwrite) { + struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry; + + max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; + max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz; + max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16; + + max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry); + dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry); + + sort_entries_with_same_bw(table, num_entries); + remove_inconsistent_entries(table, num_entries); + } + + + // At this point, the table only contains supported points of interest // it could be used as is, but some states may be redundant due to // coarse grained nature of some clocks, so we want to round up to diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h index e8fad9b4be69..c6623b3705ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h @@ -29,10 +29,6 @@ #include "dml/display_mode_vba.h" -void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, - unsigned int *num_entries, - struct _vcs_dpi_voltage_scaling_st *entry); - void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index ff0246a9458f..fb17f8868cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -167,6 +167,7 @@ struct _vcs_dpi_voltage_scaling_st { double phyclk_mhz; double dppclk_mhz; double dtbclk_mhz; + float net_bw_in_kbytes_sec; }; /** diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 6faf40fa5c69..ecb7bcc39469 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -230,6 +230,7 @@ struct clk_bw_params { unsigned int dram_channel_width_bytes; unsigned int dispclk_vco_khz; unsigned int dc_mode_softmax_memclk; + unsigned int max_memclk_mhz; struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index a151865a3a20..4ca4192c1e12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -156,7 +156,7 @@ struct hwseq_private_funcs { void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); - void (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx, + unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index ba95facc4ee8..ef8739df91bc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -82,8 +82,15 @@ bool dp_parse_link_loss_status( } /* Check interlane align.*/ - if (sink_status_changed || - !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && + (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) { + sink_status_changed = true; + } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + sink_status_changed = true; + } + + if (sink_status_changed) { DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); @@ -201,6 +208,25 @@ void dp_handle_link_loss(struct dc_link *link) } } +static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) +{ + enum dc_status retval; + union lane_align_status_updated dpcd_lane_status_updated; + + retval = core_link_read_dpcd( + link, + DP_LANE_ALIGN_STATUS_UPDATED, + &dpcd_lane_status_updated.raw, + sizeof(union lane_align_status_updated)); + + if (retval == DC_OK) { + irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b; + irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b; + } +} + enum dc_status dp_read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) @@ -242,6 +268,13 @@ enum dc_status dp_read_hpd_rx_irq_data( irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + + /* + * This display doesn't have correct values in DPCD200Eh. + * Read and check DPCD204h instead. + */ + if (link->wa_flags.read_dpcd204h_on_irq_hpd) + read_dpcd204h_on_irq_hpd(link, irq_data); } return retval; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 7c9a2b34bd05..4585e0419da6 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -367,6 +367,8 @@ struct dmub_srv_hw_funcs { bool (*is_supported)(struct dmub_srv *dmub); + bool (*is_psrsu_supported)(struct dmub_srv *dmub); + bool (*is_hw_init)(struct dmub_srv *dmub); void (*enable_dmub_boot_options)(struct dmub_srv *dmub, @@ -492,7 +494,7 @@ struct dmub_notification { * of a firmware to know if feature or functionality is supported or present. */ #define DMUB_FW_VERSION(major, minor, revision) \ - ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF)) + ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8)) /** * dmub_srv_create() - creates the DMUB service. diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index ebf7aeec4029..5e952541e72d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -302,6 +302,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub) return supported; } +bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub) +{ + return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59); +} + void dmub_dcn31_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 7d5c10ee539b..89c5a948b67d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -221,6 +221,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn31_is_supported(struct dmub_srv *dmub); +bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub); + void dmub_dcn31_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c index 48a06dbd9be7..f161aeb7e7c4 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c @@ -60,3 +60,8 @@ const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = { { DMUB_DCN31_FIELDS() }, #undef DMUB_SF }; + +bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub) +{ + return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h index 674267a2940e..f213bd82c911 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h @@ -30,4 +30,6 @@ extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs; +bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub); + #endif /* _DMUB_DCN314_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 9e9a6a44a7ac..bdaf43892f47 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -226,14 +226,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) case DMUB_ASIC_DCN314: case DMUB_ASIC_DCN315: case DMUB_ASIC_DCN316: - if (asic == DMUB_ASIC_DCN314) + if (asic == DMUB_ASIC_DCN314) { dmub->regs_dcn31 = &dmub_srv_dcn314_regs; - else if (asic == DMUB_ASIC_DCN315) + funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported; + } else if (asic == DMUB_ASIC_DCN315) { dmub->regs_dcn31 = &dmub_srv_dcn315_regs; - else if (asic == DMUB_ASIC_DCN316) + } else if (asic == DMUB_ASIC_DCN316) { dmub->regs_dcn31 = &dmub_srv_dcn316_regs; - else + } else { dmub->regs_dcn31 = &dmub_srv_dcn31_regs; + funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported; + } funcs->reset = dmub_dcn31_reset; funcs->reset_release = dmub_dcn31_reset_release; funcs->backdoor_load = dmub_dcn31_backdoor_load; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index a57952b93e73..9ef88a0b1b57 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -35,44 +35,6 @@ #include <linux/pm_runtime.h> #include <asm/processor.h> -static const struct cg_flag_name clocks[] = { - {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, - {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, - {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, - {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, - {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, - {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, - {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, - {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, - {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, - {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, - {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, - {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"}, - {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, - {0, NULL}, -}; - static const struct hwmon_temp_label { enum PP_HWMON_TEMP channel; const char *label; @@ -2110,6 +2072,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): case IP_VERSION(10, 3, 0): case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): @@ -2120,7 +2083,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } } else if (DEVICE_ATTR_IS(pp_features)) { - if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0)) + if ((adev->flags & AMD_IS_APU && + gc_ver != IP_VERSION(9, 4, 3)) || + gc_ver < IP_VERSION(9, 0, 0)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(gpu_metrics)) { if (gc_ver < IP_VERSION(9, 1, 0)) @@ -3684,6 +3649,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a return 0; } +static const struct cg_flag_name clocks[] = { + {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, + {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, + {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, + {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, + {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, + {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, + {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, + {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, + {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, + {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, + {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, + {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, + {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"}, + {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, + {0, NULL}, +}; + static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags) { int i; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index d178f3f44081..42172b00be66 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal { int max_mem_crit_temp; /* memory max emergency(shutdown) temp */ int max_mem_emergency_temp; + /* SWCTF threshold */ + int sw_ctf_threshold; /* was last interrupt low to high or high to low */ bool high_to_low; /* interrupt source */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 11b7b4cffaae..ff360c699171 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -26,6 +26,7 @@ #include <linux/gfp.h> #include <linux/slab.h> #include <linux/firmware.h> +#include <linux/reboot.h> #include "amd_shared.h" #include "amd_powerplay.h" #include "power_state.h" @@ -91,6 +92,45 @@ static int pp_early_init(void *handle) return 0; } +static void pp_swctf_delayed_work_handler(struct work_struct *work) +{ + struct pp_hwmgr *hwmgr = + container_of(work, struct pp_hwmgr, swctf_delayed_work.work); + struct amdgpu_device *adev = hwmgr->adev; + struct amdgpu_dpm_thermal *range = + &adev->pm.dpm.thermal; + uint32_t gpu_temperature, size; + int ret; + + /* + * If the hotspot/edge temperature is confirmed as below SW CTF setting point + * after the delay enforced, nothing will be done. + * Otherwise, a graceful shutdown will be performed to prevent further damage. + */ + if (range->sw_ctf_threshold && + hwmgr->hwmgr_func->read_sensor) { + ret = hwmgr->hwmgr_func->read_sensor(hwmgr, + AMDGPU_PP_SENSOR_HOTSPOT_TEMP, + &gpu_temperature, + &size); + /* + * For some legacy ASICs, hotspot temperature retrieving might be not + * supported. Check the edge temperature instead then. + */ + if (ret == -EOPNOTSUPP) + ret = hwmgr->hwmgr_func->read_sensor(hwmgr, + AMDGPU_PP_SENSOR_EDGE_TEMP, + &gpu_temperature, + &size); + if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold) + return; + } + + dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); + dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); + orderly_poweroff(true); +} + static int pp_sw_init(void *handle) { struct amdgpu_device *adev = handle; @@ -101,6 +141,10 @@ static int pp_sw_init(void *handle) pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); + if (!ret) + INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work, + pp_swctf_delayed_work_handler); + return ret; } @@ -135,6 +179,8 @@ static int pp_hw_fini(void *handle) struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); + hwmgr_hw_fini(hwmgr); return 0; @@ -221,6 +267,8 @@ static int pp_suspend(void *handle) struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); + return hwmgr_suspend(hwmgr); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c index 981dc8c7112d..90452b66e107 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c @@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) TEMP_RANGE_MAX, TEMP_RANGE_MIN, TEMP_RANGE_MAX, - TEMP_RANGE_MAX}; + TEMP_RANGE_MAX, + 0}; struct amdgpu_device *adev = hwmgr->adev; if (!hwmgr->not_vf) @@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) adev->pm.dpm.thermal.min_mem_temp = range.mem_min; adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold; return ret; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e10cc5e7928e..6841a4bce186 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5432,6 +5432,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, thermal_data->max = data->thermal_temp_setting.temperature_shutdown * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->sw_ctf_threshold = thermal_data->max; + return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index bfe80ac0ad8c..d0b1ab6c4523 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; uint32_t client_id = entry->client_id; uint32_t src_id = entry->src_id; if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) { if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) { - dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); - /* - * SW CTF just occurred. - * Try to do a graceful shutdown to prevent further damage. - */ - dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); - orderly_poweroff(true); - } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) + schedule_delayed_work(&hwmgr->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) { dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n"); - else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) { + } else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) { dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); /* * HW CTF just occurred. Shutdown to prevent further damage. @@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev, orderly_poweroff(true); } } else if (client_id == SOC15_IH_CLIENTID_THM) { - if (src_id == 0) { - dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); - /* - * SW CTF just occurred. - * Try to do a graceful shutdown to prevent further damage. - */ - dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); - orderly_poweroff(true); - } else + if (src_id == 0) + schedule_delayed_work(&hwmgr->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + else dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n"); } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 99cd2e63afdd..c51dd4c74fe9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5241,6 +5241,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, { struct vega10_hwmgr *data = hwmgr->backend; PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); @@ -5257,6 +5260,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit && + tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP) + thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp; + else + thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; + thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index e9db137cd1c6..1937be1cf5b4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2763,6 +2763,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *thermal_data) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); PPTable_t *pp_table = &(data->smc_state_table.pp_table); @@ -2781,6 +2783,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, PP_TEMPERATURE_UNITS_PER_CENTIGRADES; thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c index ed3dff0b52d2..ae342c58cd3e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c @@ -192,7 +192,9 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); - val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK; + val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0d4d4811527c..4e19ccbdb807 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -4206,6 +4206,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *thermal_data) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); PPTable_t *pp_table = &(data->smc_state_table.pp_table); @@ -4224,6 +4226,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, PP_TEMPERATURE_UNITS_PER_CENTIGRADES; thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index f4f4efdbda79..e9737ca8418a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -263,7 +263,9 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); - val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK; + val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index f1580a26a850..612d66aeaab9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -811,6 +811,8 @@ struct pp_hwmgr { bool gfxoff_state_changed_by_workload; uint32_t pstate_sclk_peak; uint32_t pstate_mclk_peak; + + struct delayed_work swctf_delayed_work; }; int hwmgr_early_init(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h index a5f2227a3971..0ffc2347829d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h @@ -131,6 +131,7 @@ struct PP_TemperatureRange { int mem_min; int mem_crit_max; int mem_emergency_max; + int sw_ctf_threshold; }; struct PP_StateValidationBlock { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 4dea79a0c5b5..ce41a8309582 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include <linux/pci.h> +#include <linux/reboot.h> #include "amdgpu.h" #include "amdgpu_smu.h" @@ -1078,6 +1079,34 @@ static void smu_interrupt_work_fn(struct work_struct *work) smu->ppt_funcs->interrupt_work(smu); } +static void smu_swctf_delayed_work_handler(struct work_struct *work) +{ + struct smu_context *smu = + container_of(work, struct smu_context, swctf_delayed_work.work); + struct smu_temperature_range *range = + &smu->thermal_range; + struct amdgpu_device *adev = smu->adev; + uint32_t hotspot_tmp, size; + + /* + * If the hotspot temperature is confirmed as below SW CTF setting point + * after the delay enforced, nothing will be done. + * Otherwise, a graceful shutdown will be performed to prevent further damage. + */ + if (range->software_shutdown_temp && + smu->ppt_funcs->read_sensor && + !smu->ppt_funcs->read_sensor(smu, + AMDGPU_PP_SENSOR_HOTSPOT_TEMP, + &hotspot_tmp, + &size) && + hotspot_tmp / 1000 < range->software_shutdown_temp) + return; + + dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); + dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); + orderly_poweroff(true); +} + static int smu_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1120,6 +1149,9 @@ static int smu_sw_init(void *handle) smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; + INIT_DELAYED_WORK(&smu->swctf_delayed_work, + smu_swctf_delayed_work_handler); + ret = smu_smc_table_sw_init(smu); if (ret) { dev_err(adev->dev, "Failed to sw init smc table!\n"); @@ -1600,6 +1632,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return ret; } + cancel_delayed_work_sync(&smu->swctf_delayed_work); + ret = smu_disable_dpms(smu); if (ret) { dev_err(adev->dev, "Fail to disable dpm features!\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 09469c750a96..6e2069dcb6b9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -573,6 +573,8 @@ struct smu_context u32 debug_param_reg; u32 debug_msg_reg; u32 debug_resp_reg; + + struct delayed_work swctf_delayed_work; }; struct i2c_adapter; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 275f708db636..c94d825a871b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1654,7 +1654,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { - int ret = 0, size = 0; + int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; @@ -1675,15 +1675,15 @@ static int navi10_force_clk_levels(struct smu_context *smu, ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) - return size; + return 0; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); if (ret) - return size; + return 0; ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); if (ret) - return size; + return 0; break; case SMU_DCEFCLK: dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); @@ -1693,7 +1693,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, break; } - return size; + return 0; } static int navi10_populate_umd_state_clk(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index e1ef88ee1ed3..aa4a5498a12f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1412,13 +1412,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, if (client_id == SOC15_IH_CLIENTID_THM) { switch (src_id) { case THM_11_0__SRCID__THM_DIG_THERM_L2H: - dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); - /* - * SW CTF just occurred. - * Try to do a graceful shutdown to prevent further damage. - */ - dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); - orderly_poweroff(true); + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L: dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index e52c563f0dac..3856da6c3f3d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1353,13 +1353,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, if (client_id == SOC15_IH_CLIENTID_THM) { switch (src_id) { case THM_11_0__SRCID__THM_DIG_THERM_L2H: - dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); - /* - * SW CTF just occurred. - * Try to do a graceful shutdown to prevent further damage. - */ - dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); - orderly_poweroff(true); + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L: dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index a6083957ae51..124287cbbff8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1710,6 +1710,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu, range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; range->software_shutdown_temp = powerplay_table->software_shutdown_temp; + range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index a92ea4601ea4..6ef12252beb5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -200,7 +200,6 @@ struct PPTable_t { }; #define SMUQ10_TO_UINT(x) ((x) >> 10) -#define SMUQ16_TO_UINT(x) ((x) >> 16) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -1994,8 +1993,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->average_socket_power = SMUQ10_TO_UINT(metrics->SocketPower); + /* Energy is reported in 15.625mJ units */ gpu_metrics->energy_accumulator = - SMUQ16_TO_UINT(metrics->SocketEnergyAcc); + SMUQ10_TO_UINT(metrics->SocketEnergyAcc); gpu_metrics->current_gfxclk = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 0600fdcd06ef..719447ce86e7 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -2435,7 +2435,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | - XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLB, val); + XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | + XELPDP_SSC_ENABLE_PLLB, val); } static u32 intel_cx0_get_powerdown_update(u8 lane_mask) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index be1a87bde0c9..df38632c6237 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -6,6 +6,9 @@ #ifndef __INTEL_DISPLAY_POWER_H__ #define __INTEL_DISPLAY_POWER_H__ +#include <linux/mutex.h> +#include <linux/workqueue.h> + #include "intel_wakeref.h" enum aux_ch; @@ -16,6 +19,7 @@ enum port; struct drm_i915_private; struct i915_power_well; struct intel_encoder; +struct seq_file; /* * Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 1118ee9d224c..5ad04cd42c15 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1252,10 +1252,18 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, POWER_DOMAIN_INIT); #define XELPD_DC_OFF_PORT_POWER_DOMAINS \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ POWER_DOMAIN_PORT_DDI_LANES_TC4, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_IO_C, \ + POWER_DOMAIN_AUX_IO_D, \ + POWER_DOMAIN_AUX_IO_E, \ POWER_DOMAIN_AUX_C, \ POWER_DOMAIN_AUX_D, \ POWER_DOMAIN_AUX_E, \ @@ -1272,14 +1280,6 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, XELPD_PW_B_POWER_DOMAINS, \ XELPD_PW_C_POWER_DOMAINS, \ XELPD_PW_D_POWER_DOMAINS, \ - POWER_DOMAIN_PORT_DDI_LANES_C, \ - POWER_DOMAIN_PORT_DDI_LANES_D, \ - POWER_DOMAIN_PORT_DDI_LANES_E, \ - POWER_DOMAIN_VGA, \ - POWER_DOMAIN_AUDIO_PLAYBACK, \ - POWER_DOMAIN_AUX_IO_C, \ - POWER_DOMAIN_AUX_IO_D, \ - POWER_DOMAIN_AUX_IO_E, \ XELPD_DC_OFF_PORT_POWER_DOMAINS I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index e494df379e6c..1015bba4af01 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -12,6 +12,8 @@ struct drm_i915_private; struct i915_power_well; +struct i915_power_well_ops; +struct intel_encoder; #define for_each_power_well(__dev_priv, __power_well) \ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 5ed450111f77..34fabadefaf6 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -2358,7 +2358,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state, mutex_lock(&dig_port->hdcp_mutex); drm_WARN_ON(&i915->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); - hdcp->content_type = (u8)conn_state->content_type; + hdcp->content_type = (u8)conn_state->hdcp_content_type; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index d58ed9b62e67..56c17283ba2d 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -933,9 +933,9 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, } io_wake_lines = intel_usecs_to_scanlines( - &crtc_state->uapi.adjusted_mode, io_wake_time); + &crtc_state->hw.adjusted_mode, io_wake_time); fast_wake_lines = intel_usecs_to_scanlines( - &crtc_state->uapi.adjusted_mode, fast_wake_time); + &crtc_state->hw.adjusted_mode, fast_wake_time); if (io_wake_lines > max_wake_lines || fast_wake_lines > max_wake_lines) diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 0f7db617425a..8750cb0d8d9d 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -81,7 +81,7 @@ #define _SRD_AUX_DATA_A 0x60814 #define _SRD_AUX_DATA_EDP 0x6f814 -#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */ +#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */ #define _SRD_STATUS_A 0x60840 #define _SRD_STATUS_EDP 0x6f840 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 01b75529311c..ee9f83af7cf6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -606,7 +606,7 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) if (unlikely(ret)) return ret; slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; - } else if (slpc->min_freq_softlimit != slpc->min_freq) { + } else { return intel_guc_slpc_set_min_freq(slpc, slpc->min_freq_softlimit); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 09d4bbcdcdbf..4de6a4e8280d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -118,15 +118,31 @@ static void mock_gt_probe(struct drm_i915_private *i915) i915->gt[0]->name = "Mock GT"; } +static const struct intel_device_info mock_info = { + .__runtime.graphics.ip.ver = -1, + .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K | + I915_GTT_PAGE_SIZE_64K | + I915_GTT_PAGE_SIZE_2M), + .__runtime.memory_regions = REGION_SMEM, + .__runtime.platform_engine_mask = BIT(0), + + /* simply use legacy cache level for mock device */ + .max_pat_index = 3, + .cachelevel_to_pat = { + [I915_CACHE_NONE] = 0, + [I915_CACHE_LLC] = 1, + [I915_CACHE_L3_LLC] = 2, + [I915_CACHE_WT] = 3, + }, +}; + struct drm_i915_private *mock_gem_device(void) { #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif struct drm_i915_private *i915; - struct intel_device_info *i915_info; struct pci_dev *pdev; - unsigned int i; int ret; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); @@ -159,15 +175,18 @@ struct drm_i915_private *mock_gem_device(void) pci_set_drvdata(pdev, i915); + /* Device parameters start as a copy of module parameters. */ + i915_params_copy(&i915->params, &i915_modparams); + + /* Set up device info and initial runtime info. */ + intel_device_info_driver_create(i915, pdev->device, &mock_info); + dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - - i915_params_copy(&i915->params, &i915_modparams); - intel_runtime_pm_init_early(&i915->runtime_pm); /* wakeref tracking has significant overhead */ i915->runtime_pm.no_wakeref_tracking = true; @@ -175,21 +194,6 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - RUNTIME_INFO(i915)->graphics.ip.ver = -1; - - RUNTIME_INFO(i915)->page_sizes = - I915_GTT_PAGE_SIZE_4K | - I915_GTT_PAGE_SIZE_64K | - I915_GTT_PAGE_SIZE_2M; - - RUNTIME_INFO(i915)->memory_regions = REGION_SMEM; - - /* simply use legacy cache level for mock device */ - i915_info = (struct intel_device_info *)INTEL_INFO(i915); - i915_info->max_pat_index = 3; - for (i = 0; i < I915_MAX_CACHE_LEVEL; i++) - i915_info->cachelevel_to_pat[i] = i; - intel_memory_regions_hw_probe(i915); spin_lock_init(&i915->gpu_error.lock); @@ -223,7 +227,6 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(to_gt(i915)); to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm); - RUNTIME_INFO(i915)->platform_engine_mask = BIT(0); to_gt(i915)->info.engine_mask = BIT(0); to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0); diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 3cc9fb0d4f5d..dc276c346fd1 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -2139,9 +2139,9 @@ static const struct panel_desc starry_himax83102_j02_desc = { static const struct drm_display_mode starry_ili9882t_default_mode = { .clock = 165280, .hdisplay = 1200, - .hsync_start = 1200 + 32, - .hsync_end = 1200 + 32 + 30, - .htotal = 1200 + 32 + 30 + 32, + .hsync_start = 1200 + 72, + .hsync_end = 1200 + 72 + 30, + .htotal = 1200 + 72 + 30 + 72, .vdisplay = 1920, .vsync_start = 1920 + 68, .vsync_end = 1920 + 68 + 2, |