diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-04-25 11:51:21 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-04-25 11:51:21 +0300 |
commit | 7f0020417b7bceaa10cf9e1efe2b3f5c554d972e (patch) | |
tree | 69d11757acfb4bacfb5f82f6f0fbf04339b3cd38 /drivers/gpu | |
parent | 616e143c9fd88562b4d2cb235834c620fe65ba22 (diff) | |
parent | ea061bad207e1ba693b5488ba64c663f7ca03f50 (diff) | |
download | linux-rolling-stable.tar.xz |
Merge v6.14.4linux-rolling-stable
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/gpu')
58 files changed, 510 insertions, 230 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 423fd2eebe1e..892a5315677f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -439,6 +439,13 @@ success: return true; } +static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev) +{ + struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE]; + + return (res->flags & IORESOURCE_ROM_SHADOW); +} + static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) { if (amdgpu_atrm_get_bios(adev)) { @@ -457,14 +464,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) goto success; } - if (amdgpu_read_platform_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from platform\n"); - goto success; - } + if (amdgpu_prefer_rom_resource(adev)) { + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } - if (amdgpu_read_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); - goto success; + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + } else { + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } } if (amdgpu_read_bios_from_rom(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9a8f6cb2b836..71e8a76180ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3442,6 +3442,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); + amdgpu_doorbell_fini(adev); } if (adev->ip_blocks[i].version->funcs->sw_fini) { r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); @@ -4770,7 +4771,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 9f627caedc3f..c9842a0e2a1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -184,7 +184,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - if (sgt->sgl->page_link) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c0ddbe7d6f0b..24c255e05079 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1791,7 +1791,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = { }; static const struct pci_device_id pciidlist[] = { -#ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1864,8 +1863,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, @@ -1948,7 +1945,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, -#endif /* topaz */ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, @@ -2280,14 +2276,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: #ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: + if (!amdgpu_si_support) { dev_info(&pdev->dev, "SI support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2295,16 +2291,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + return -ENODEV; #endif + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: #ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: + if (!amdgpu_cik_support) { dev_info(&pdev->dev, "CIK support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2312,8 +2310,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); + return -ENODEV; #endif + default: + break; + } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 96f4b8904e9a..00752e3f9d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) * When GTT is just an alternative to VRAM make sure that we * only use it as fallback and still try to fill up VRAM first. */ - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && - !(adev->flags & AMD_IS_APU)) + if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) && + domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index f9a4d08eef92..0f808ffcab94 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -899,6 +899,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev) { int pipe; + /* return early if we have already fetched these */ + if (adev->mes.sched_version && adev->mes.kiq_version) + return; + /* get MES scheduler/KIQ versions */ mutex_lock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 0fd0fa6ed518..6b121c2723d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -1390,17 +1390,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev, mes_v12_0_queue_init_register(ring); } - /* get MES scheduler/KIQ versions */ - mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, 3, pipe, 0, 0); + if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || + ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { + /* get MES scheduler/KIQ versions */ + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, pipe, 0, 0); - if (pipe == AMDGPU_MES_SCHED_PIPE) - adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) - adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + if (pipe == AMDGPU_MES_SCHED_PIPE) + adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) + adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - soc21_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 39df45f652b3..80a3cbd2cbe5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1719,9 +1719,30 @@ static const struct dmi_system_id dmi_quirk_table[] = { .callback = edp0_on_dp1_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), }, }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, {} /* TODO: refactor this from a fixed table to a dynamic option */ }; @@ -8577,14 +8598,39 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, int offdelay; if (acrtc_state) { - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < - IP_VERSION(3, 5, 0) || - acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED || - !(adev->flags & AMD_IS_APU)) { - timing = &acrtc_state->stream->timing; - - /* at least 2 frames */ + timing = &acrtc_state->stream->timing; + + /* + * Depending on when the HW latching event of double-buffered + * registers happen relative to the PSR SDP deadline, and how + * bad the Panel clock has drifted since the last ALPM off + * event, there can be up to 3 frames of delay between sending + * the PSR exit cmd to DMUB fw, and when the panel starts + * displaying live frames. + * + * We can set: + * + * 20/100 * offdelay_ms = 3_frames_ms + * => offdelay_ms = 5 * 3_frames_ms + * + * This ensures that `3_frames_ms` will only be experienced as a + * 20% delay on top how long the display has been static, and + * thus make the delay less perceivable. + */ + if (acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + config.offdelay_ms = offdelay ?: 30; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + !(adev->flags & AMD_IS_APU)) { + /* + * Older HW and DGPU have issues with instant off; + * use a 2 frame offdelay. + */ offdelay = DIV64_U64_ROUND_UP((u64)20 * timing->v_total * timing->h_total, @@ -8592,6 +8638,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, config.offdelay_ms = offdelay ?: 30; } else { + /* offdelay_ms = 0 will never disable vblank */ + config.offdelay_ms = 1; config.disable_immediate = true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 36a830a7440f..87058271b00c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) * * Panel Replay and PSR SU * - Enable when: + * - VRR is disabled * - vblank counter is disabled * - entry is allowed: usermode demonstrates an adequate number of fast * commits) @@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( bool is_sr_active = (link->replay_settings.replay_allow_active || link->psr_settings.psr_allow_active); bool is_crc_window_active = false; + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY is_crc_window_active = amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); #endif - if (link->replay_settings.replay_feature_enabled && + if (link->replay_settings.replay_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { amdgpu_dm_replay_enable(vblank_work->stream, true); } else if (vblank_enabled) { if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) amdgpu_dm_psr_disable(vblank_work->stream, false); - } else if (link->psr_settings.psr_feature_enabled && + } else if (link->psr_settings.psr_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { struct amdgpu_dm_connector *aconn = diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index fb80ba9287b6..d6fd13f43c08 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,6 +2,7 @@ // // Copyright 2024 Advanced Micro Devices, Inc. +#include <linux/vmalloc.h> #include "dml2_internal_types.h" #include "dml_top.h" @@ -13,11 +14,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = vzalloc(sizeof(struct dml2_context)); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance)); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,7 +28,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; @@ -86,6 +87,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /* Store configuration options */ (*dml_ctx)->config = *config; + DC_FP_START(); + /*Initialize SOCBB and DCNIP params */ dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); @@ -96,6 +99,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /*Initialize DML21 instance */ dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + + DC_FP_END(); } bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) @@ -111,8 +116,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s void dml21_destroy(struct dml2_context *dml2) { - kfree(dml2->v21.dml_init.dml2_instance); - kfree(dml2->v21.mode_programming.programming); + vfree(dml2->v21.dml_init.dml2_instance); + vfree(dml2->v21.mode_programming.programming); } static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, @@ -269,11 +274,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml { bool out = false; + DC_FP_START(); + /* Use dml_validate_only for fast_validate path */ - if (fast_validate) { + if (fast_validate) out = dml21_check_mode_support(in_dc, context, dml_ctx); - } else + else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); + + DC_FP_END(); + return out; } @@ -412,8 +422,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming; + DC_FP_START(); + /* need to initialize copied instance for internal references to be correct */ dml2_initialize_instance(&dst_dml_ctx->v21.dml_init); + + DC_FP_END(); } bool dml21_create_copy(struct dml2_context **dst_dml_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 68b882d28195..d0f9df2daeb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -24,6 +24,8 @@ * */ +#include <linux/vmalloc.h> + #include "display_mode_core.h" #include "dml2_internal_types.h" #include "dml2_utils.h" @@ -732,17 +734,22 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 return out; } + DC_FP_START(); + /* Use dml_validate_only for fast_validate path */ if (fast_validate) out = dml2_validate_only(context); else out = dml2_validate_and_build_resource(in_dc, context); + + DC_FP_END(); + return out; } static inline struct dml2_context *dml2_allocate_memory(void) { - return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + return (struct dml2_context *) vzalloc(sizeof(struct dml2_context)); } static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) @@ -776,11 +783,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op break; } + DC_FP_START(); + initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip); initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); + + DC_FP_END(); } bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) @@ -806,7 +817,7 @@ void dml2_destroy(struct dml2_context *dml2) if (dml2->architecture == dml2_architecture_21) dml21_destroy(dml2); - kfree(dml2); + vfree(dml2); } void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a5e18ab72394..b78096a7690e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -3027,7 +3027,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); phyd32clk = get_phyd32clk_src(link); - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { if (dccg->funcs->enable_symclk_se) dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 555a9f590cd7..89af3e4afbc2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -936,8 +936,11 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); - - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 911bd60d4fbc..3c42ba8566cf 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -890,7 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE, + .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, .using_dml2 = false, }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c index a8fc0fa44db6..ba5c1237fcfe 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c @@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 379012494da5..56423aedf3fa 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return -1; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index a3331ffb2daf..1b1c88590156 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t tach_period, crystal_clock_freq; int result = 0; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 8aa61a9f7778..453952cdc353 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (!speed || speed > UINT_MAX/8) + return -EINVAL; + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 189c6a32b6bd..54229b991858 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1200,7 +1200,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index fbbdfa54f6a2..b508d475d2e1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1229,7 +1229,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 3f1fcf8c4ee8..5b5971f750a6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -79,6 +79,7 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -1042,6 +1043,10 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1320,6 +1325,24 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -2260,7 +2283,9 @@ static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET; } static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, @@ -2339,6 +2364,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; } smu_v14_0_2_set_supported_od_feature_mask(smu); @@ -2386,6 +2413,11 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; case PP_OD_EDIT_ACOUSTIC_LIMIT: od_table->OverDriveTable.AcousticLimitRpmThreshold = boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; @@ -2668,6 +2700,27 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index b9eb67e3fa90..2d7482a65f62 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -17,6 +17,12 @@ static bool ast_astdp_is_connected(struct ast_device *ast) { if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD)) return false; + /* + * HPD might be set even if no monitor is connected, so also check that + * the link training was successful. + */ + if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS)) + return false; return true; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 23edc81741de..dd1cae3021af 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -244,6 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->deinterleave = 4; break; case INTEL_DRAM_GDDR: + case INTEL_DRAM_GDDR_ECC: qi->channel_width = 32; break; default: @@ -398,6 +399,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = { /* Other values not used by simplified algorithm */ }; +static const struct intel_sa_info xe2_hpd_ecc_sa_info = { + .derating = 45, + .deprogbwlimit = 53, + /* Other values not used by simplified algorithm */ +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -740,10 +747,15 @@ static unsigned int icl_qgv_bw(struct drm_i915_private *i915, void intel_bw_init_hw(struct drm_i915_private *dev_priv) { + const struct dram_info *dram_info = &dev_priv->dram_info; + if (!HAS_DISPLAY(dev_priv)) return; - if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) + if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) && + dram_info->type == INTEL_DRAM_GDDR_ECC) + xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info); + else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); else if (DISPLAY_VER(dev_priv) >= 14) tgl_get_bw_info(dev_priv, &mtl_sa_info); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c9dcf2bbd4c7..c974f86eaf93 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1109,7 +1109,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || - old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; + old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full || + old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start || + old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end; } static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 9a333d9e6601..3b1e8e4c86e7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -159,6 +159,7 @@ struct intel_display_platforms { #define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13) #define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb) #define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc) +#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display)) #define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display)) #define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0) #define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index aa77ddcee42c..219ee9b7f976 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -172,10 +172,28 @@ int intel_dp_link_symbol_clock(int rate) static int max_dprx_rate(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int max_rate; + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) - return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + else + max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); - return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + /* + * Some broken eDP sinks illegally declare support for + * HBR3 without TPS4, and are unable to produce a stable + * output. Reject HBR3 when TPS4 is not available. + */ + if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", + encoder->base.base.id, encoder->base.name); + max_rate = 540000; + } + + return max_rate; } static int max_dprx_lane_count(struct intel_dp *intel_dp) @@ -1032,10 +1050,11 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes; /* - * 3 DSC Slices per pipe need 3 DSC engines, - * which is supported only with Ultrajoiner. + * 3 DSC Slices per pipe need 3 DSC engines, which is supported only + * with Ultrajoiner only for some platforms. */ - if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4) + if (valid_dsc_slicecount[i] == 3 && + (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4)) continue; if (test_slice_count > @@ -4188,6 +4207,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) static void intel_edp_set_sink_rates(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + intel_dp->num_sink_rates = 0; if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { @@ -4198,10 +4220,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) sink_rates, sizeof(sink_rates)); for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { - int val = le16_to_cpu(sink_rates[i]); - - if (val == 0) - break; + int rate; /* Value read multiplied by 200kHz gives the per-lane * link rate in kHz. The source rates are, however, @@ -4209,7 +4228,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) * back to symbols is * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) */ - intel_dp->sink_rates[i] = (val * 200) / 10; + rate = le16_to_cpu(sink_rates[i]) * 200 / 10; + + if (rate == 0) + break; + + /* + * Some broken eDP sinks illegally declare support for + * HBR3 without TPS4, and are unable to produce a stable + * output. Reject HBR3 when TPS4 is not available. + */ + if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", + encoder->base.base.id, encoder->base.name); + break; + } + + intel_dp->sink_rates[i] = rate; } intel_dp->num_sink_rates = i; } diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index a95fb3349eba..9e024d153e83 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -222,7 +222,9 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) * However if queried just before the start of vblank we'll get an * answer that's slightly in the future. */ - if (DISPLAY_VER(display) == 2) + if (DISPLAY_VER(display) >= 20 || display->platform.battlemage) + return 1; + else if (DISPLAY_VER(display) == 2) return -1; else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return 2; diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 509f9ccae3a9..dbad4d853d3a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) u8 *buf; struct opregion_header *header; struct vbt v; - const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | @@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; - memcpy(header->signature, opregion_signature, - sizeof(opregion_signature)); + + static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1); + memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature)); + header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b96b8de12756..def73ce0c3ba 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -306,6 +306,7 @@ struct drm_i915_private { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 9e310f4099f4..f60eedb0e92c 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -687,6 +687,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915) drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); dram_info->type = INTEL_DRAM_GDDR; break; + case 9: + drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); + dram_info->type = INTEL_DRAM_GDDR_ECC; + break; default: MISSING_CASE(val); return -EINVAL; diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c index 3debc9870a82..d09c4c684116 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.c +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) fw_mem->core_data, fw_mem->core_code_alloc_size); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size); memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size); @@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev) memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size); /* We're finished with the firmware section memory on the CPU, unmap. */ - if (fw_core_data_ptr) + if (fw_core_data_ptr) { pvr_fw_object_vunmap(fw_mem->core_data_obj); - if (fw_core_code_ptr) + fw_core_data_ptr = NULL; + } + if (fw_core_code_ptr) { pvr_fw_object_vunmap(fw_mem->core_code_obj); + fw_core_code_ptr = NULL; + } pvr_fw_object_vunmap(fw_mem->data_obj); fw_data_ptr = NULL; pvr_fw_object_vunmap(fw_mem->code_obj); @@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) err = pvr_fw_create_fwif_connection_ctl(pvr_dev); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; return 0; @@ -763,13 +767,16 @@ err_free_kdata: kfree(fw_mem->data); kfree(fw_mem->code); -err_free_fw_core_data_obj: if (fw_core_data_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj); + pvr_fw_object_vunmap(fw_mem->core_data_obj); + if (fw_mem->core_data_obj) + pvr_fw_object_destroy(fw_mem->core_data_obj); err_free_fw_core_code_obj: if (fw_core_code_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj); + pvr_fw_object_vunmap(fw_mem->core_code_obj); + if (fw_mem->core_code_obj) + pvr_fw_object_destroy(fw_mem->core_code_obj); err_free_fw_data_obj: if (fw_data_ptr) @@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev) struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; pvr_fw_fini_fwif_connection_ctl(pvr_dev); + + kfree(fw_mem->core_data); + kfree(fw_mem->core_code); + kfree(fw_mem->data); + kfree(fw_mem->code); + if (fw_mem->core_code_obj) pvr_fw_object_destroy(fw_mem->core_code_obj); if (fw_mem->core_data_obj) diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 618503a212a7..aad183a57371 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -677,6 +677,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) geom_job->paired_job = frag_job; frag_job->paired_job = geom_job; + /* The geometry job pvr_job structure is used when the fragment + * job is being prepared by the GPU scheduler. Have the fragment + * job hold a reference on the geometry job to prevent it being + * freed until the fragment job has finished with it. + */ + pvr_job_get(geom_job); + /* Skip the fragment job we just paired to the geometry job. */ i++; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 43411be930a2..fa1afe1193e1 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job) struct pvr_job *job = container_of(sched_job, struct pvr_job, base); drm_sched_job_cleanup(sched_job); + + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) + pvr_job_put(job->paired_job); + job->paired_job = NULL; pvr_job_put(job); } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fb71658c3117..6067d08aeee3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vsyncstr = mode->crtc_vsync_start - 1; vsyncend = mode->crtc_vsync_end - 1; vtotal = mode->crtc_vtotal - 2; - vblkstr = mode->crtc_vblank_start; + vblkstr = mode->crtc_vblank_start - 1; vblkend = vtotal + 1; linecomp = vdispend; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 699b0dd34b18..38c94915d4c9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1169,49 +1169,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(gmu); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + } - /* If the GMU isn't responding assume it is hung */ - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + ret = a6xx_gmu_wait_for_idle(gmu); - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; - /* tell the GMU we want to slumber */ - ret = a6xx_gmu_notify_slumber(gmu); - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); - } + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -1221,6 +1222,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + + return; + +force_off: + a6xx_gmu_force_off(gmu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 0ae29a7c8a4d..2a317cdb8eaa 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -242,10 +242,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } @@ -377,10 +377,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h index ab3dfb0b374e..cb89d8cf300f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -132,7 +132,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -141,7 +140,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h index 6bdaecca6761..b2a38e62d3a9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -118,7 +118,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h index 14f36ea6ad0e..06859984d2a5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -131,7 +131,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -140,7 +139,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -149,7 +147,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h index 491f6f5827d1..153e275513c3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x268, @@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h index 424815e7fb7d..f247625ed7e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h @@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x280, @@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h index df01227fc364..5ddd2b3c0cd2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h @@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index af3e541f60c3..b19193b02ab3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1059,6 +1059,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state; int ret; + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + if (plane_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 42e100a8adca..e9b824ed4353 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1827,8 +1827,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) __func__, ret); goto err; } - if (!ret) + if (!ret) { msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL); + if (!msm_dsi->te_source) { + DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n", + __func__); + ret = -ENOMEM; + goto err; + } + } ret = 0; if (of_property_present(np, "syscon-sfpb")) { diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index 55a35182858c..5a6ae9fc3194 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -2259,5 +2259,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> +<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-"> + <reg64 offset="0" name="IB_BASE" type="address"/> + <reg32 offset="2" name="2"> + <bitfield name="IB_SIZE" low="0" high="19"/> + </reg32> +</domain> + </database> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db961eade225..2016c1e7242f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + /* * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9ae2cee1c7c5..67e3c99de73a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - if (gem->import_attach) - drm_prime_gem_destroy(gem, nvbo->bo.sg); - ttm_bo_put(&nvbo->bo); pm_runtime_mark_last_busy(dev); diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index f203ac5514ae..f778a4eee7c9 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -7,8 +7,6 @@ sti-drm-y := \ sti_compositor.o \ sti_crtc.o \ sti_plane.o \ - sti_crtc.o \ - sti_plane.o \ sti_hdmi.o \ sti_hdmi_tx3g4c28phy.o \ sti_dvo.o \ diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 52ba6c699bc8..5c3b51eb0a97 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -456,7 +456,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_fixed(epd, fixed_value, stage); @@ -467,7 +467,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image, const u8 *mask, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_data(epd, image, mask, stage); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 05608c894ed9..6db503a56918 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -428,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); struct drm_v3d_submit_csd *args = &indirect_csd->job->args; - u32 *wg_counts; + struct v3d_dev *v3d = job->base.v3d; + u32 num_batches, *wg_counts; v3d_get_bo_vaddr(bo); v3d_get_bo_vaddr(indirect); @@ -441,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; - args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * - (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]); + + /* V3D 7.1.6 and later don't subtract 1 from the number of batches */ + if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6)) + args->cfg[4] = num_batches - 1; + else + args->cfg[4] = num_batches; + + WARN_ON(args->cfg[4] == ~0); for (int i = 0; i < 3; i++) { /* 0xffffffff indicates that the uniform rewrite is not needed */ diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 5aab588fc400..3d6aa26fdb53 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, if (!vgdev->has_context_init) virtio_gpu_create_context(obj->dev, file); - objs = virtio_gpu_array_alloc(1); - if (!objs) - return -ENOMEM; - virtio_gpu_array_add_obj(objs, obj); + if (vfpriv->context_created) { + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); - if (vfpriv->ctx_id) virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs); + } out_notify: virtio_gpu_notify(vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 42aa554eca9f..26abe3d1b122 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -322,12 +322,6 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return 0; obj = new_state->fb->obj[0]; - if (obj->import_attach) { - ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); - if (ret) - return ret; - } - if (bo->dumb || obj->import_attach) { vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, @@ -336,7 +330,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return -ENOMEM; } + if (obj->import_attach) { + ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); + if (ret) + goto err_fence; + } + return 0; + +err_fence: + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; + } + + return ret; } static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 8a7b15972413..eaf7a5bf0a38 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -559,6 +559,7 @@ struct xe_device { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index f67803e15a0e..f7a20264ea33 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct dma_buf *dma_buf = attach->dmabuf; - struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv); - - if (!xe_bo_is_vram(bo)) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 0a93831c0a02..9405d83d4db2 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) return 0; } +/* + * Ensure that roundup_pow_of_two(length) doesn't overflow. + * Note that roundup_pow_of_two() operates on unsigned long, + * not on u64. + */ +#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX)) + /** * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an * address range @@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_device *xe = gt_to_xe(gt); #define MAX_TLB_INVALIDATION_LEN 7 u32 action[MAX_TLB_INVALIDATION_LEN]; + u64 length = end - start; int len = 0; xe_gt_assert(gt, fence); @@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ - if (!xe->info.has_range_tlb_invalidation) { + if (!xe->info.has_range_tlb_invalidation || + length > MAX_RANGE_TLB_INVALIDATION_LENGTH) { action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); } else { u64 orig_start = start; - u64 length = end - start; u64 align; if (length < SZ_4K) diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index fab259adc380..bcbbe0d99306 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -490,24 +490,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt, engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER)); } -static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads) +/* + * Write the offsets corresponding to the golden LRCs. The actual data is + * populated later by guc_golden_lrc_populate() + */ +static void guc_golden_lrc_init(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); + struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); - u8 guc_class; + size_t alloc_size, real_size; + u32 addr_ggtt, offset; + int class; + + offset = guc_ads_golden_lrc_offset(ads); + addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; + + for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { + u8 guc_class; + + guc_class = xe_engine_class_to_guc_class(class); - for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) { if (!info_map_read(xe, &info_map, engine_enabled_masks[guc_class])) continue; + real_size = xe_gt_lrc_size(gt, class); + alloc_size = PAGE_ALIGN(real_size); + + /* + * This interface is slightly confusing. We need to pass the + * base address of the full golden context and the size of just + * the engine state, which is the section of the context image + * that starts after the execlists LRC registers. This is + * required to allow the GuC to restore just the engine state + * when a watchdog reset occurs. + * We calculate the engine state size by removing the size of + * what comes before it in the context image (which is identical + * on all engines). + */ ads_blob_write(ads, ads.eng_state_size[guc_class], - guc_ads_golden_lrc_size(ads) - - xe_lrc_skip_size(xe)); + real_size - xe_lrc_skip_size(xe)); ads_blob_write(ads, ads.golden_context_lrca[guc_class], - xe_bo_ggtt_addr(ads->bo) + - guc_ads_golden_lrc_offset(ads)); + addr_ggtt); + + addr_ggtt += alloc_size; } } @@ -857,7 +885,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); guc_policies_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init_invalid(gt, &info_map); guc_doorbell_init(ads); @@ -883,7 +911,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_policies_init(ads); fill_engine_enable_masks(gt, &info_map); guc_mmio_reg_state_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init(gt, &info_map); guc_capture_prep_lists(ads); guc_doorbell_init(ads); @@ -903,18 +931,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_ads_private_data_offset(ads)); } -static void guc_populate_golden_lrc(struct xe_guc_ads *ads) +/* + * After the golden LRC's are recorded for each engine class by the first + * submission, copy them to the ADS, as initialized earlier by + * guc_golden_lrc_init(). + */ +static void guc_golden_lrc_populate(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); size_t total_size = 0, alloc_size, real_size; - u32 addr_ggtt, offset; + u32 offset; int class; offset = guc_ads_golden_lrc_offset(ads); - addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { u8 guc_class; @@ -931,26 +963,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) alloc_size = PAGE_ALIGN(real_size); total_size += alloc_size; - /* - * This interface is slightly confusing. We need to pass the - * base address of the full golden context and the size of just - * the engine state, which is the section of the context image - * that starts after the execlists LRC registers. This is - * required to allow the GuC to restore just the engine state - * when a watchdog reset occurs. - * We calculate the engine state size by removing the size of - * what comes before it in the context image (which is identical - * on all engines). - */ - ads_blob_write(ads, ads.eng_state_size[guc_class], - real_size - xe_lrc_skip_size(xe)); - ads_blob_write(ads, ads.golden_context_lrca[guc_class], - addr_ggtt); - xe_map_memcpy_to(xe, ads_to_map(ads), offset, gt->default_lrc[class], real_size); - addr_ggtt += alloc_size; offset += alloc_size; } @@ -959,7 +974,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) { - guc_populate_golden_lrc(ads); + guc_golden_lrc_populate(ads); } static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset) diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index c3cc0fa105e8..57b71956ddf4 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end) return (end - start) >> PAGE_SHIFT; } -/** - * xe_mark_range_accessed() - mark a range is accessed, so core mm - * have such information for memory eviction or write back to - * hard disk - * @range: the range to mark - * @write: if write to this range, we mark pages in this range - * as dirty - */ -static void xe_mark_range_accessed(struct hmm_range *range, bool write) -{ - struct page *page; - u64 i, npages; - - npages = xe_npages_in_range(range->start, range->end); - for (i = 0; i < npages; i++) { - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (write) - set_page_dirty_lock(page); - - mark_page_accessed(page); - } -} - static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st, struct hmm_range *range, struct rw_semaphore *notifier_sem) { @@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, if (ret) goto out_unlock; - xe_mark_range_accessed(&hmm_range, write); userptr->sg = &userptr->sgt; xe_hmm_userptr_set_mapped(uvma); userptr->notifier_seq = hmm_range.notifier_seq; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 278bc96cf593..fb93345b3770 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1177,7 +1177,7 @@ err: err_sync: /* Sync partial copies if any. FIXME: job_mutex? */ if (fence) { - dma_fence_wait(m->fence, false); + dma_fence_wait(fence, false); dma_fence_put(fence); } |