diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-09 23:42:39 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-09 23:42:39 +0300 |
commit | efc58a96adcd29cc37487a60582d9d08b34f6640 (patch) | |
tree | a295fc3403d252bc9f41d60e55b23a8a2164dde8 /drivers/gpu | |
parent | 506c30790f5409ce58aa21c14d7c2aa86df328f5 (diff) | |
parent | 0844708ac3d2dbdace70f4a6020669d56958697f (diff) | |
download | linux-efc58a96adcd29cc37487a60582d9d08b34f6640.tar.xz |
Merge tag 'drm-next-2021-05-10' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Bit later than usual, I queued them all up on Friday then promptly
forgot to write the pull request email. This is mainly amdgpu fixes,
with some radeon/msm/fbdev and one i915 gvt fix thrown in.
amdgpu:
- MPO hang workaround
- Fix for concurrent VM flushes on vega/navi
- dcefclk is not adjustable on navi1x and newer
- MST HPD debugfs fix
- Suspend/resumes fixes
- Register VGA clients late in case driver fails to load
- Fix GEM leak in user framebuffer create
- Add support for polaris12 with 32 bit memory interface
- Fix duplicate cursor issue when using overlay
- Fix corruption with tiled surfaces on VCN3
- Add BO size and stride check to fix BO size verification
radeon:
- Fix off-by-one in power state parsing
- Fix possible memory leak in power state parsing
msm:
- NULL ptr dereference fix
fbdev:
- procfs disabled warning fix
i915:
- gvt: Fix a possible division by zero in vgpu display rate
calculation"
* tag 'drm-next-2021-05-10' of git://anongit.freedesktop.org/drm/drm:
drm/amdgpu: Use device specific BO size & stride check.
drm/amdgpu: Init GFX10_ADDR_CONFIG for VCN v3 in DPG mode.
drm/amd/pm: initialize variable
drm/radeon: Avoid power table parsing memory leaks
drm/radeon: Fix off-by-one power_state index heap overwrite
drm/amd/display: Fix two cursor duplication when using overlay
drm/amdgpu: add new MC firmware for Polaris12 32bit ASIC
fbmem: Mark proc_fb_seq_ops as __maybe_unused
drm/msm/dpu: Delete bonkers code
drm/i915/gvt: Prevent divided by zero when calculating refresh rate
amdgpu: fix GEM obj leak in amdgpu_display_user_framebuffer_create
drm/amdgpu: Register VGA clients after init can no longer fail
drm/amdgpu: Handling of amdgpu_device_resume return value for graceful teardown
drm/amdgpu: fix r initial values
drm/amd/display: fix wrong statement in mst hpd debugfs
amdgpu/pm: set pp_dpm_dcefclk to readonly on NAVI10 and newer gpus
amdgpu/pm: Prevent force of DCEFCLK on NAVI10 and SIENNA_CICHLID
drm/amdgpu: fix concurrent VM flushes on Vega/Navi v2
drm/amd/display: Reject non-zero src_y and src_x for video planes
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 184 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 68 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_pm.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_atombios.c | 26 |
18 files changed, 331 insertions, 80 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4ad1c055c70..7d3b54615147 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3410,19 +3410,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); - /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ - /* this will fail for cards that aren't VGA class devices, just - * ignore it */ - if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); - - if (amdgpu_device_supports_px(ddev)) { - px = true; - vga_switcheroo_register_client(adev->pdev, - &amdgpu_switcheroo_ops, px); - vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - } - if (amdgpu_emu_mode == 1) { /* post the asic on emulation mode */ emu_soc_asic_init(adev); @@ -3619,6 +3606,19 @@ fence_driver_init: if (amdgpu_device_cache_pci_state(adev->pdev)) pci_restore_state(pdev); + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ + /* this will fail for cards that aren't VGA class devices, just + * ignore it */ + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); + + if (amdgpu_device_supports_px(ddev)) { + px = true; + vga_switcheroo_register_client(adev->pdev, + &amdgpu_switcheroo_ops, px); + vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); + } + if (adev->gmc.xgmi.pending_reset) queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); @@ -3630,8 +3630,6 @@ release_ras_con: failed: amdgpu_vf_error_trans_all(adev); - if (px) - vga_switcheroo_fini_domain_pm_ops(adev->dev); failed_unmap: iounmap(adev->rmmio); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 2e622c1675d7..8a1fb8b6606e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) return 0; } +static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, + unsigned int *width, unsigned int *height) +{ + unsigned int cpp_log2 = ilog2(cpp); + unsigned int pixel_log2 = block_log2 - cpp_log2; + unsigned int width_log2 = (pixel_log2 + 1) / 2; + unsigned int height_log2 = pixel_log2 - width_log2; + + *width = 1 << width_log2; + *height = 1 << height_log2; +} + +static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned, + bool pipe_aligned) +{ + unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier); + + switch (ver) { + case AMD_FMT_MOD_TILE_VER_GFX9: { + /* + * TODO: for pipe aligned we may need to check the alignment of the + * total size of the surface, which may need to be bigger than the + * natural alignment due to some HW workarounds + */ + return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12); + } + case AMD_FMT_MOD_TILE_VER_GFX10: + case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: { + int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); + + if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 && + AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2) + ++pipes_log2; + + return max(8 + (pipe_aligned ? pipes_log2 : 0), 12); + } + default: + return 0; + } +} + +static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane, + const struct drm_format_info *format, + unsigned int block_width, unsigned int block_height, + unsigned int block_size_log2) +{ + unsigned int width = rfb->base.width / + ((plane && plane < format->num_planes) ? format->hsub : 1); + unsigned int height = rfb->base.height / + ((plane && plane < format->num_planes) ? format->vsub : 1); + unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1; + unsigned int block_pitch = block_width * cpp; + unsigned int min_pitch = ALIGN(width * cpp, block_pitch); + unsigned int block_size = 1 << block_size_log2; + uint64_t size; + + if (rfb->base.pitches[plane] % block_pitch) { + drm_dbg_kms(rfb->base.dev, + "pitch %d for plane %d is not a multiple of block pitch %d\n", + rfb->base.pitches[plane], plane, block_pitch); + return -EINVAL; + } + if (rfb->base.pitches[plane] < min_pitch) { + drm_dbg_kms(rfb->base.dev, + "pitch %d for plane %d is less than minimum pitch %d\n", + rfb->base.pitches[plane], plane, min_pitch); + return -EINVAL; + } + + /* Force at least natural alignment. */ + if (rfb->base.offsets[plane] % block_size) { + drm_dbg_kms(rfb->base.dev, + "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n", + rfb->base.offsets[plane], plane, block_size); + return -EINVAL; + } + + size = rfb->base.offsets[plane] + + (uint64_t)rfb->base.pitches[plane] / block_pitch * + block_size * DIV_ROUND_UP(height, block_height); + + if (rfb->base.obj[0]->size < size) { + drm_dbg_kms(rfb->base.dev, + "BO size 0x%zx is less than 0x%llx required for plane %d\n", + rfb->base.obj[0]->size, size, plane); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) +{ + const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format); + uint64_t modifier = rfb->base.modifier; + int ret; + unsigned int i, block_width, block_height, block_size_log2; + + if (!rfb->base.dev->mode_config.allow_fb_modifiers) + return 0; + + for (i = 0; i < format_info->num_planes; ++i) { + if (modifier == DRM_FORMAT_MOD_LINEAR) { + block_width = 256 / format_info->cpp[i]; + block_height = 1; + block_size_log2 = 8; + } else { + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); + + switch ((swizzle & ~3) + 1) { + case DC_SW_256B_S: + block_size_log2 = 8; + break; + case DC_SW_4KB_S: + case DC_SW_4KB_S_X: + block_size_log2 = 12; + break; + case DC_SW_64KB_S: + case DC_SW_64KB_S_T: + case DC_SW_64KB_S_X: + block_size_log2 = 16; + break; + default: + drm_dbg_kms(rfb->base.dev, + "Swizzle mode with unknown block size: %d\n", swizzle); + return -EINVAL; + } + + get_block_dimensions(block_size_log2, format_info->cpp[i], + &block_width, &block_height); + } + + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, block_size_log2); + if (ret) + return ret; + } + + if (AMD_FMT_MOD_GET(DCC, modifier)) { + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { + block_size_log2 = get_dcc_block_size(modifier, false, false); + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], + &block_width, &block_height); + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, + block_size_log2); + if (ret) + return ret; + + ++i; + block_size_log2 = get_dcc_block_size(modifier, true, true); + } else { + bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier); + + block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned); + } + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], + &block_width, &block_height); + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, block_size_log2); + if (ret) + return ret; + } + + return 0; +} + static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, uint64_t *tiling_flags, bool *tmz_surface) { @@ -902,10 +1070,8 @@ int amdgpu_display_gem_fb_verify_and_init( int ret; rfb->base.obj[0] = obj; - - /* Verify that bo size can fit the fb size. */ - ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd, - &amdgpu_fb_funcs); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) goto err; /* Verify that the modifier is supported. */ @@ -967,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, } } - for (i = 1; i < rfb->base.format->num_planes; ++i) { + ret = amdgpu_display_verify_sizes(rfb); + if (ret) + return ret; + + for (i = 0; i < rfb->base.format->num_planes; ++i) { drm_gem_object_get(rfb->base.obj[0]); - drm_gem_object_put(rfb->base.obj[i]); rfb->base.obj[i] = rfb->base.obj[0]; } @@ -999,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); + drm_gem_object_put(obj); return ERR_PTR(-EINVAL); } @@ -1412,7 +1582,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) } } } - return r; + return 0; } int amdgpu_display_resume_helper(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 922938931e1a..f93883db2b46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1573,6 +1573,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); + if (ret) + return ret; + if (amdgpu_device_supports_px(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; adev->in_runpm = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 94b069630db3..b4971e90b98c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, /* Check if we have an idle VMID */ i = 0; list_for_each_entry((*idle), &id_mgr->ids_lru, list) { - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); + /* Don't use per engine and per process VMID at the same time */ + struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? + NULL : ring; + + fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); if (!fences[i]) break; ++i; @@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, if (updates && (*id)->flushed_updates && updates->context == (*id)->flushed_updates->context && !dma_fence_is_later(updates, (*id)->flushed_updates)) - updates = NULL; + updates = NULL; if ((*id)->owner != vm->immediate.fence_context || job->vm_pd_addr != (*id)->pd_gpu_addr || @@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, !dma_fence_is_signaled((*id)->last_flush))) { struct dma_fence *tmp; + /* Don't use per engine and per process VMID at the same time */ + if (adev->vm_manager.concurrent_flush) + ring = NULL; + /* to prevent one context starved by another context */ (*id)->pd_gpu_addr = 0; tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); @@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, if (updates && (!flushed || dma_fence_is_later(updates, flushed))) needs_flush = true; - /* Concurrent flushes are only possible starting with Vega10 and - * are broken on Navi10 and Navi14. - */ - if (needs_flush && (adev->asic_type < CHIP_VEGA10 || - adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14)) + if (needs_flush && !adev->vm_manager.concurrent_flush) continue; /* Good, we can use this VMID. Remember this submission as diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0ffdf847cad0..9acee4a5b2ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3148,6 +3148,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { unsigned i; + /* Concurrent flushes are only possible starting with Vega10 and + * are broken on Navi10 and Navi14. + */ + adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || + adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14); amdgpu_vmid_mgr_init(adev); adev->vm_manager.fence_context = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 976a12e5a8b9..4e140288159c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -331,6 +331,7 @@ struct amdgpu_vm_manager { /* Handling of VMIDs */ struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; unsigned int first_kfd_vmid; + bool concurrent_flush; /* Handling of VM fences */ u64 fence_context; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c1bd190841f8..e4f27b3f28fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); @@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) chip_name = "polaris10"; break; case CHIP_POLARIS12: - if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) + if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { chip_name = "polaris12_k"; - else - chip_name = "polaris12"; + } else { + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159); + /* Polaris12 32bit ASIC needs a special MC firmware */ + if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40) + chip_name = "polaris12_32"; + else + chip_name = "polaris12"; + } break; case CHIP_FIJI: case CHIP_CARRIZO: diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3f15bf34123a..cf165ab5dd26 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -589,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b34ab76c5f4c..389eff96fcf6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4015,6 +4015,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, scaling_info->src_rect.x = state->src_x >> 16; scaling_info->src_rect.y = state->src_y >> 16; + /* + * For reasons we don't (yet) fully understand a non-zero + * src_y coordinate into an NV12 buffer can cause a + * system hang. To avoid hangs (and maybe be overly cautious) + * let's reject both non-zero src_x and src_y. + * + * We currently know of only one use-case to reproduce a + * scenario with non-zero src_x and src_y for NV12, which + * is to gesture the YouTube Android app into full screen + * on ChromeOS. + */ + if (state->fb && + state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || + scaling_info->src_rect.y != 0)) + return -EINVAL; + scaling_info->src_rect.width = state->src_w >> 16; if (scaling_info->src_rect.width == 0) return -EINVAL; @@ -9869,6 +9886,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm } #endif +static int validate_overlay(struct drm_atomic_state *state) +{ + int i; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane_state *primary_state, *overlay_state = NULL; + + /* Check if primary plane is contained inside overlay */ + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + if (drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + overlay_state = new_plane_state; + continue; + } + } + + /* check if we're making changes to the overlay plane */ + if (!overlay_state) + return 0; + + /* check if overlay plane is enabled */ + if (!overlay_state->crtc) + return 0; + + /* find the primary plane for the CRTC that the overlay is enabled on */ + primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + + /* check if primary plane is enabled */ + if (!primary_state->crtc) + return 0; + + /* Perform the bounds check to ensure the overlay plane covers the primary */ + if (primary_state->crtc_x < overlay_state->crtc_x || + primary_state->crtc_y < overlay_state->crtc_y || + primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || + primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { + DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); + return -EINVAL; + } + + return 0; +} + /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * @dev: The DRM device @@ -10043,6 +10107,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + ret = validate_overlay(state); + if (ret) + goto fail; + /* Add new/modified planes */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 529545045a3e..1b6b15708b96 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3012,7 +3012,7 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (!aconnector->dc_link) continue; - if (!(aconnector->port && &aconnector->mst_port->mst_mgr)) + if (!aconnector->mst_port) continue; link = aconnector->dc_link; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 8128603ef495..9a54066ec0af 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -451,7 +451,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - struct pp_states_info data; + struct pp_states_info data = {0}; enum amd_pm_state_type pm = 0; int i = 0, ret = 0; @@ -1893,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } + if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { + /* SMU MP1 does not support dcefclk level setting */ + if (asic_type >= CHIP_NAVI10) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + } + #undef DEVICE_ATTR_IS return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index f827096dc849..ac13042672ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1443,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_MCLK: case SMU_UCLK: - case SMU_DCEFCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { @@ -1463,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return size; break; + case SMU_DCEFCLK: + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + break; + default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 72d9c1be1835..d2fd44b903ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_MCLK: case SMU_UCLK: - case SMU_DCEFCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) { @@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; break; + case SMU_DCEFCLK: + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + break; default: break; } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 477badfcb258..dda320749c65 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -669,8 +669,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); /* Get H/V total from transcoder timing */ - htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT) + 1; - vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT) + 1; + htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); + vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); if (dp_br && link_n && htotal && vtotal) { u64 pixel_clk = 0; @@ -682,7 +682,7 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) pixel_clk *= MSEC_PER_SEC; /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */ - new_rate = DIV64_U64_ROUND_CLOSEST(pixel_clk, div64_u64(mul_u32_u32(htotal, vtotal), MSEC_PER_SEC)); + new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1)); if (*old_rate != new_rate) *old_rate = new_rate; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 7c29976be243..18bc76b7f1a3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -648,16 +648,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, if (unlikely(!cstate->num_mixers)) return; - /* - * For planes without commit update, drm framework will not add - * those planes to current state since hardware update is not - * required. However, if those planes were power collapsed since - * last commit cycle, driver has to restore the hardware state - * of those planes explicitly here prior to plane flush. - */ - drm_atomic_crtc_for_each_plane(plane, crtc) - dpu_plane_restore(plane, state); - /* update performance setting before crtc kickoff */ dpu_core_perf_crtc_update(crtc, 1, false); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index df7f3d3afd8b..7a993547eb75 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1258,22 +1258,6 @@ static void dpu_plane_atomic_update(struct drm_plane *plane, } } -void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state) -{ - struct dpu_plane *pdpu; - - if (!plane || !plane->state) { - DPU_ERROR("invalid plane\n"); - return; - } - - pdpu = to_dpu_plane(plane); - - DPU_DEBUG_PLANE(pdpu, "\n"); - - dpu_plane_atomic_update(plane, state); -} - static void dpu_plane_destroy(struct drm_plane *plane) { struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 03b6365a750c..34e03ac05f4a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -85,12 +85,6 @@ void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, u32 *flush_sspp); /** - * dpu_plane_restore - restore hw state if previously power collapsed - * @plane: Pointer to drm plane structure - */ -void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state); - -/** * dpu_plane_flush - final plane operations before commit flush * @plane: Pointer to drm plane structure */ diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 42301b4e56f5..28c4413f4dc8 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { - rdev->pm.power_state[state_index].clock_info = - kcalloc(1, sizeof(struct radeon_pm_clock_info), - GFP_KERNEL); + /* avoid memory leaks from invalid modes or unknown frev. */ + if (!rdev->pm.power_state[state_index].clock_info) { + rdev->pm.power_state[state_index].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); + } if (!rdev->pm.power_state[state_index].clock_info) - return state_index; + goto out; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { @@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) break; } } +out: + /* free any unused clock_info allocation. */ + if (state_index && state_index < num_modes) { + kfree(rdev->pm.power_state[state_index].clock_info); + rdev->pm.power_state[state_index].clock_info = NULL; + } + /* last mode is usually default */ - if (rdev->pm.default_power_state_index == -1) { + if (state_index && rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[state_index - 1].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index - 1; rdev->pm.power_state[state_index - 1].default_clock_mode = &rdev->pm.power_state[state_index - 1].clock_info[0]; - rdev->pm.power_state[state_index].flags &= + rdev->pm.power_state[state_index - 1].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = 0; - rdev->pm.power_state[state_index].misc2 = 0; + rdev->pm.power_state[state_index - 1].misc = 0; + rdev->pm.power_state[state_index - 1].misc2 = 0; } return state_index; } |