diff options
Diffstat (limited to 'drivers/gpu')
133 files changed, 1869 insertions, 1140 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8639a4f9c6e8..2eca58220550 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1293,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v); struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, struct dma_fence *gang); +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0561812aa0a4..5d9a34601a1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -706,6 +706,13 @@ err: void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { + /* Temporary workaround to fix issues observed in some + * compute applications when GFXOFF is enabled on GFX11. + */ + if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) { + pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); + amdgpu_gfx_off_ctrl(adev, idle); + } amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index c8935d718207..4485bb29bec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 84f44f7e4111..1f76e27f1a35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size) - - reserved_for_pt)) { + adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } @@ -988,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; struct ttm_operation_ctx ctx = { true, false }; + struct hmm_range *range; int ret = 0; mutex_lock(&process_info->lock); @@ -1017,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); if (ret) { pr_err("%s: Failed to get user pages: %d\n", __func__, ret); goto unregister_out; @@ -1035,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, amdgpu_bo_unreserve(bo); release_out: - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); unregister_out: if (ret) amdgpu_mn_unregister(bo); @@ -2372,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Go through userptr_inval_list and update any invalid user_pages */ list_for_each_entry(mem, &process_info->userptr_inval_list, validate_list.head) { + struct hmm_range *range; + invalid = atomic_read(&mem->invalid); if (!invalid) /* BO hasn't been invalidated since the last @@ -2382,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, + &range); if (ret) { pr_debug("Failed %d to get user pages\n", ret); @@ -2401,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, * FIXME: Cannot ignore the return code, must hold * notifier_lock */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); } /* Mark the BO as valid unless it was invalidated diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 2168163aad2d..252a876b0725 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, list_add_tail(&e->tv.head, &bucket[priority]); e->user_pages = NULL; + e->range = NULL; } /* Connect the sorted buckets in the output list. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 9caea1688fc3..e4d78491bcc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -26,6 +26,8 @@ #include <drm/ttm/ttm_execbuf_util.h> #include <drm/amdgpu_drm.h> +struct hmm_range; + struct amdgpu_device; struct amdgpu_bo; struct amdgpu_bo_va; @@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo_va *bo_va; uint32_t priority; struct page **user_pages; + struct hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 491d4846fc02..cfb262911bfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -328,7 +328,6 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector) kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL; - drm_connector_update_edid_property(connector, NULL); } static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1bbd39b3b0fc..365e3fb6a9e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, return r; ++(num_ibs[r]); + p->gang_leader_idx = r; return 0; } @@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } } - if (!p->gang_size) - return -EINVAL; + if (!p->gang_size) { + ret = -EINVAL; + goto free_partial_kdata; + } for (i = 0; i < p->gang_size; ++i) { ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); @@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (ret) goto free_all_kdata; } - p->gang_leader = p->jobs[p->gang_size - 1]; + p->gang_leader = p->jobs[p->gang_leader_idx]; if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { ret = -ECANCELED; @@ -910,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out_free_user_pages; } - r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); + r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range); if (r) { kvfree(e->user_pages); e->user_pages = NULL; @@ -988,10 +991,12 @@ out_free_user_pages: if (!e->user_pages) continue; - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); kvfree(e->user_pages); e->user_pages = NULL; + e->range = NULL; } + mutex_unlock(&p->bo_list->bo_list_mutex); return r; } @@ -1194,16 +1199,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) return r; } - for (i = 0; i < p->gang_size - 1; ++i) { + for (i = 0; i < p->gang_size; ++i) { + if (p->jobs[i] == leader) + continue; + r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); if (r) return r; } - r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]); + r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); if (r && r != -ERESTARTSYS) DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); - return r; } @@ -1237,9 +1244,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, for (i = 0; i < p->gang_size; ++i) drm_sched_job_arm(&p->jobs[i]->base); - for (i = 0; i < (p->gang_size - 1); ++i) { + for (i = 0; i < p->gang_size; ++i) { struct dma_fence *fence; + if (p->jobs[i] == leader) + continue; + fence = &p->jobs[i]->base.s_fence->scheduled; r = amdgpu_sync_fence(&leader->sync, fence); if (r) @@ -1264,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + e->range = NULL; } if (r) { r = -EAGAIN; @@ -1275,7 +1286,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, list_for_each_entry(e, &p->validated, tv.head) { /* Everybody except for the gang leader uses READ */ - for (i = 0; i < (p->gang_size - 1); ++i) { + for (i = 0; i < p->gang_size; ++i) { + if (p->jobs[i] == leader) + continue; + dma_resv_add_fence(e->tv.bo->base.resv, &p->jobs[i]->base.s_fence->finished, DMA_RESV_USAGE_READ); @@ -1285,7 +1299,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, e->tv.num_shared = 0; } - seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1], + seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], p->fence); amdgpu_cs_post_dependencies(p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index cbaa19b2b8a3..f80adf9069ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -54,6 +54,7 @@ struct amdgpu_cs_parser { /* scheduler job objects */ unsigned int gang_size; + unsigned int gang_leader_idx; struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE]; struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; struct amdgpu_job *gang_leader; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ddaecb2610c9..f1e9663b4051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4060,15 +4060,18 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) * at suspend time. * */ -static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +static int amdgpu_device_evict_resources(struct amdgpu_device *adev) { + int ret; + /* No need to evict vram on APUs for suspend to ram or s2idle */ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) - return; + return 0; - if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); + if (ret) DRM_WARN("evicting device resources failed\n"); - + return ret; } /* @@ -4118,7 +4121,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - amdgpu_device_evict_resources(adev); + r = amdgpu_device_evict_resources(adev); + if (r) + return r; amdgpu_fence_driver_hw_fini(adev); @@ -6039,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, dma_fence_put(old); return NULL; } + +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + case CHIP_TOPAZ: + /* chips with no display hardware */ + return false; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_CARRIZO: + case CHIP_STONEY: + /* chips with display hardware */ + return true; + default: + /* IP discovery */ + if (!adev->ip_versions[DCE_HWIP][0] || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) + return false; + return true; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3c9fecdd6b2f..bf2d50c8c92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2201,7 +2201,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(dev->dev); } - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && + !amdgpu_sriov_vf(adev)) { bool need_to_reset_gpu = false; if (adev->gmc.xgmi.num_physical_nodes > 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ef31d687ef3..91571b1324f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; struct drm_gem_object *gobj; + struct hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -413,14 +414,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; - if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { - r = amdgpu_mn_register(bo, args->addr); - if (r) - goto release_object; - } + r = amdgpu_mn_register(bo, args->addr); + if (r) + goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, + &range); if (r) goto release_object; @@ -443,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); release_object: drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 34233a74248c..28612e56d0d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -479,6 +479,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) unsigned i; unsigned vmhub, inv_eng; + if (adev->enable_mes) { + /* reserve engine 5 for firmware */ + for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) + vm_inv_engs[vmhub] &= ~(1 << 5); + } + for (i = 0; i < adev->num_rings; ++i) { ring = adev->rings[i]; vmhub = ring->funcs->vmhub; @@ -656,7 +662,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev) || - !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { + !amdgpu_device_has_display_hardware(adev)) { size = 0; } else { size = amdgpu_gmc_get_vbios_fb_size(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index cd968e781077..adac650cf544 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -169,7 +169,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - dma_fence_put(&job->hw_fence); + /* only put the hw fence if has embedded fence */ + if (!job->hw_fence.ops) + kfree(job); + else + dma_fence_put(&job->hw_fence); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -254,6 +258,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, DRM_ERROR("Error adding fence (%d)\n", r); } + if (!fence && job->gang_submit) + fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); + while (fence == NULL && vm && !job->vmid) { r = amdgpu_vmid_grab(vm, ring, &job->sync, &job->base.s_fence->finished, @@ -264,9 +271,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, fence = amdgpu_sync_get_fence(&job->sync); } - if (!fence && job->gang_submit) - fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); - return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bf1ff8f0e712..4e42dcb1950f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -337,12 +337,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = adev->psp.cap_feature_version; break; case AMDGPU_INFO_FW_MES_KIQ: - fw_info->ver = adev->mes.ucode_fw_version[0]; - fw_info->feature = 0; + fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK; + fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) + >> AMDGPU_MES_FEAT_VERSION_SHIFT; break; case AMDGPU_INFO_FW_MES: - fw_info->ver = adev->mes.ucode_fw_version[1]; - fw_info->feature = 0; + fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; + fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) + >> AMDGPU_MES_FEAT_VERSION_SHIFT; break; case AMDGPU_INFO_FW_IMU: fw_info->ver = adev->gfx.imu_fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index effa7df3ddbf..7978307e1d6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -172,6 +172,7 @@ void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) { amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); + mem_ctx->shared_bo = NULL; } static void psp_free_shared_bufs(struct psp_context *psp) @@ -182,6 +183,7 @@ static void psp_free_shared_bufs(struct psp_context *psp) /* free TMR memory buffer */ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); + psp->tmr_bo = NULL; /* free xgmi shared memory */ psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); @@ -743,7 +745,7 @@ static int psp_load_toc(struct psp_context *psp, /* Set up Trusted Memory Region */ static int psp_tmr_init(struct psp_context *psp) { - int ret; + int ret = 0; int tmr_size; void *tmr_buf; void **pptr; @@ -770,10 +772,12 @@ static int psp_tmr_init(struct psp_context *psp) } } - pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, - AMDGPU_GEM_DOMAIN_VRAM, - &psp->tmr_bo, &psp->tmr_mc_addr, pptr); + if (!psp->tmr_bo) { + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->tmr_bo, &psp->tmr_mc_addr, pptr); + } return ret; } @@ -2732,8 +2736,6 @@ static int psp_suspend(void *handle) } out: - psp_free_shared_bufs(psp); - return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 57277b1cf183..b64938ed8cb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -643,9 +643,6 @@ struct amdgpu_ttm_tt { struct task_struct *usertask; uint32_t userflags; bool bound; -#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - struct hmm_range *range; -#endif }; #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) @@ -658,7 +655,8 @@ struct amdgpu_ttm_tt { * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * once afterwards to stop HMM tracking */ -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, + struct hmm_range **range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) bool readonly; int r = 0; + /* Make sure get_user_pages_done() can cleanup gracefully */ + *range = NULL; + mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); return -EFAULT; } - /* Another get_user_pages is running at the same time?? */ - if (WARN_ON(gtt->range)) - return -EFAULT; - if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; @@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) readonly = amdgpu_ttm_tt_is_readonly(ttm); r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, - ttm->num_pages, >t->range, readonly, + ttm->num_pages, range, readonly, true, NULL); out_unlock: mmap_read_unlock(mm); @@ -713,30 +710,24 @@ out_unlock: * * Returns: true if pages are still valid */ -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) +bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range) { struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - bool r = false; - if (!gtt || !gtt->userptr) + if (!gtt || !gtt->userptr || !range) return false; DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, - "No user pages to check\n"); + WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - if (gtt->range) { - /* - * FIXME: Must always hold notifier_lock for this, and must - * not ignore the return code. - */ - r = amdgpu_hmm_range_get_pages_done(gtt->range); - gtt->range = NULL; - } - - return !r; + /* + * FIXME: Must always hold notifier_lock for this, and must + * not ignore the return code. + */ + return !amdgpu_hmm_range_get_pages_done(range); } #endif @@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, /* unmap the pages mapped to the device */ dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); sg_free_table(ttm->sg); - -#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - if (gtt->range) { - unsigned long i; - - for (i = 0; i < ttm->num_pages; i++) { - if (ttm->pages[i] != - hmm_pfn_to_page(gtt->range->hmm_pfns[i])) - break; - } - - WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); - } -#endif } static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6a70818039dd..a37207011a69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -39,6 +39,8 @@ #define AMDGPU_POISON 0xd0bed0be +struct hmm_range; + struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; @@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, + struct hmm_range **range); +bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct page **pages) + struct page **pages, + struct hmm_range **range) { return -EPERM; } -static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) +static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range) { return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 0b52af415b28..ce64ca1c6e66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -156,6 +156,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 2): fw_name = FIRMWARE_VANGOGH; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; break; case IP_VERSION(3, 0, 16): fw_name = FIRMWARE_DIMGREY_CAVEFISH; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index f4b5301ea2a0..500a1dc4fe02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -500,6 +500,8 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + r = amdgpu_display_modeset_create_props(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2291aa14d888..003aa9e47085 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; } -/* - * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS - * happens while holding this lock anywhere to prevent deadlocks when - * an MMU notifier runs in reclaim-FS context. - */ -static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) -{ - mutex_lock(&vm->eviction_lock); - vm->saved_flags = memalloc_noreclaim_save(); -} - -static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) -{ - if (mutex_trylock(&vm->eviction_lock)) { - vm->saved_flags = memalloc_noreclaim_save(); - return 1; - } - return 0; -} - -static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) -{ - memalloc_noreclaim_restore(vm->saved_flags); - mutex_unlock(&vm->eviction_lock); -} - /** * amdgpu_vm_bo_evicted - vm_bo is evicted * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 83acb7bd80fe..6546e786bf00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); */ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) { + unsigned long flags; + spinlock_t *lock; + + /* + * Workaround to stop racing between the fence signaling and handling + * the cb. The lock is static after initially setting it up, just make + * sure that the dma_fence structure isn't freed up. + */ + rcu_read_lock(); + lock = vm->last_tlb_flush->lock; + rcu_read_unlock(); + + spin_lock_irqsave(lock, flags); + spin_unlock_irqrestore(lock, flags); + return atomic64_read(&vm->tlb_seq); } +/* + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_noreclaim_save(); +} + +static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_noreclaim_save(); + return true; + } + return false; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_noreclaim_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 358b91243e37..b5f3bba851db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, if (entry->bo) return 0; + amdgpu_vm_eviction_unlock(vm); r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); + amdgpu_vm_eviction_lock(vm); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 73a517bcf5c1..80dd1343594c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -435,7 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - if (fpfn || lpfn != man->size) + if (fpfn || lpfn != mgr->mm.size) /* Allocate blocks in desired range */ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h index f772bb499f3e..0312c71c3af9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h @@ -32,7 +32,6 @@ #define RB_ENABLED (1 << 0) #define RB4_ENABLED (1 << 1) -#define MMSCH_DOORBELL_OFFSET 0x8 #define MMSCH_VF_ENGINE_STATUS__PASS 0x1 diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 21d822b1d589..88f9b327183a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 897a5ce9c9da..dcc49b01bd59 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -100,7 +100,6 @@ static int vcn_v4_0_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; - int vcn_doorbell_index = 0; r = amdgpu_vcn_sw_init(adev); if (r) @@ -112,12 +111,6 @@ static int vcn_v4_0_sw_init(void *handle) if (r) return r; - if (amdgpu_sriov_vf(adev)) { - vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET; - /* get DWORD offset */ - vcn_doorbell_index = vcn_doorbell_index << 1; - } - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; @@ -135,7 +128,7 @@ static int vcn_v4_0_sw_init(void *handle) ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; if (amdgpu_sriov_vf(adev)) - ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1; else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index c7118843db05..0c4c5499bb5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -2495,442 +2495,444 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_gfx11_hex[] = { - 0xbfa00001, 0xbfa0021e, + 0xbfa00001, 0xbfa00221, 0xb0804006, 0xb8f8f802, 0x9178ff78, 0x00020006, - 0xb8fbf803, 0xbf0d9f6d, - 0xbfa20006, 0x8b6eff78, - 0x00002000, 0xbfa10009, - 0x8b6eff6d, 0x00ff0000, - 0xbfa2001e, 0x8b6eff7b, - 0x00000400, 0xbfa20041, - 0xbf830010, 0xb8fbf803, - 0xbfa0fffa, 0x8b6eff7b, - 0x00000900, 0xbfa20015, - 0x8b6eff7b, 0x000071ff, - 0xbfa10008, 0x8b6fff7b, - 0x00007080, 0xbfa10001, - 0xbeee1287, 0xb8eff801, - 0x846e8c6e, 0x8b6e6f6e, - 0xbfa2000a, 0x8b6eff6d, - 0x00ff0000, 0xbfa20007, - 0xb8eef801, 0x8b6eff6e, - 0x00000800, 0xbfa20003, + 0xb8fbf803, 0xbf0d9e6d, + 0xbfa10001, 0xbfbd0000, + 0xbf0d9f6d, 0xbfa20006, + 0x8b6eff78, 0x00002000, + 0xbfa10009, 0x8b6eff6d, + 0x00ff0000, 0xbfa2001e, 0x8b6eff7b, 0x00000400, - 0xbfa20026, 0xbefa4d82, - 0xbf89fc07, 0x84fa887a, - 0xf4005bbd, 0xf8000010, - 0xbf89fc07, 0x846e976e, - 0x9177ff77, 0x00800000, - 0x8c776e77, 0xf4045bbd, - 0xf8000000, 0xbf89fc07, - 0xf4045ebd, 0xf8000008, - 0xbf89fc07, 0x8bee6e6e, - 0xbfa10001, 0xbe80486e, - 0x8b6eff6d, 0x01ff0000, - 0xbfa20005, 0x8c78ff78, - 0x00002000, 0x80ec886c, - 0x82ed806d, 0xbfa00005, - 0x8b6eff6d, 0x01000000, - 0xbfa20002, 0x806c846c, - 0x826d806d, 0x8b6dff6d, - 0x0000ffff, 0x8bfe7e7e, - 0x8bea6a6a, 0xb978f802, - 0xbe804a6c, 0x8b6dff6d, - 0x0000ffff, 0xbefa0080, - 0xb97a0283, 0xbeee007e, - 0xbeef007f, 0xbefe0180, - 0xbefe4d84, 0xbf89fc07, - 0x8b7aff7f, 0x04000000, - 0x847a857a, 0x8c6d7a6d, - 0xbefa007e, 0x8b7bff7f, - 0x0000ffff, 0xbefe00c1, - 0xbeff00c1, 0xdca6c000, - 0x007a0000, 0x7e000280, - 0xbefe007a, 0xbeff007b, - 0xb8fb02dc, 0x847b997b, - 0xb8fa3b05, 0x807a817a, - 0xbf0d997b, 0xbfa20002, - 0x847a897a, 0xbfa00001, - 0x847a8a7a, 0xb8fb1e06, - 0x847b8a7b, 0x807a7b7a, + 0xbfa20041, 0xbf830010, + 0xb8fbf803, 0xbfa0fffa, + 0x8b6eff7b, 0x00000900, + 0xbfa20015, 0x8b6eff7b, + 0x000071ff, 0xbfa10008, + 0x8b6fff7b, 0x00007080, + 0xbfa10001, 0xbeee1287, + 0xb8eff801, 0x846e8c6e, + 0x8b6e6f6e, 0xbfa2000a, + 0x8b6eff6d, 0x00ff0000, + 0xbfa20007, 0xb8eef801, + 0x8b6eff6e, 0x00000800, + 0xbfa20003, 0x8b6eff7b, + 0x00000400, 0xbfa20026, + 0xbefa4d82, 0xbf89fc07, + 0x84fa887a, 0xf4005bbd, + 0xf8000010, 0xbf89fc07, + 0x846e976e, 0x9177ff77, + 0x00800000, 0x8c776e77, + 0xf4045bbd, 0xf8000000, + 0xbf89fc07, 0xf4045ebd, + 0xf8000008, 0xbf89fc07, + 0x8bee6e6e, 0xbfa10001, + 0xbe80486e, 0x8b6eff6d, + 0x01ff0000, 0xbfa20005, + 0x8c78ff78, 0x00002000, + 0x80ec886c, 0x82ed806d, + 0xbfa00005, 0x8b6eff6d, + 0x01000000, 0xbfa20002, + 0x806c846c, 0x826d806d, + 0x8b6dff6d, 0x0000ffff, + 0x8bfe7e7e, 0x8bea6a6a, + 0xb978f802, 0xbe804a6c, + 0x8b6dff6d, 0x0000ffff, + 0xbefa0080, 0xb97a0283, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbefe4d84, + 0xbf89fc07, 0x8b7aff7f, + 0x04000000, 0x847a857a, + 0x8c6d7a6d, 0xbefa007e, 0x8b7bff7f, 0x0000ffff, - 0x807aff7a, 0x00000200, - 0x807a7e7a, 0x827b807b, - 0xd7610000, 0x00010870, - 0xd7610000, 0x00010a71, - 0xd7610000, 0x00010c72, - 0xd7610000, 0x00010e73, - 0xd7610000, 0x00011074, - 0xd7610000, 0x00011275, - 0xd7610000, 0x00011476, - 0xd7610000, 0x00011677, - 0xd7610000, 0x00011a79, - 0xd7610000, 0x00011c7e, - 0xd7610000, 0x00011e7f, - 0xbefe00ff, 0x00003fff, - 0xbeff0080, 0xdca6c040, - 0x007a0000, 0xd760007a, - 0x00011d00, 0xd760007b, - 0x00011f00, 0xbefe007a, - 0xbeff007b, 0xbef4007e, - 0x8b75ff7f, 0x0000ffff, - 0x8c75ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x10807fac, 0xbef1007d, - 0xbef00080, 0xb8f302dc, - 0x84739973, 0xbefe00c1, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00002, - 0xbeff00c1, 0xbfa00009, + 0xbefe00c1, 0xbeff00c1, + 0xdca6c000, 0x007a0000, + 0x7e000280, 0xbefe007a, + 0xbeff007b, 0xb8fb02dc, + 0x847b997b, 0xb8fa3b05, + 0x807a817a, 0xbf0d997b, + 0xbfa20002, 0x847a897a, + 0xbfa00001, 0x847a8a7a, + 0xb8fb1e06, 0x847b8a7b, + 0x807a7b7a, 0x8b7bff7f, + 0x0000ffff, 0x807aff7a, + 0x00000200, 0x807a7e7a, + 0x827b807b, 0xd7610000, + 0x00010870, 0xd7610000, + 0x00010a71, 0xd7610000, + 0x00010c72, 0xd7610000, + 0x00010e73, 0xd7610000, + 0x00011074, 0xd7610000, + 0x00011275, 0xd7610000, + 0x00011476, 0xd7610000, + 0x00011677, 0xd7610000, + 0x00011a79, 0xd7610000, + 0x00011c7e, 0xd7610000, + 0x00011e7f, 0xbefe00ff, + 0x00003fff, 0xbeff0080, + 0xdca6c040, 0x007a0000, + 0xd760007a, 0x00011d00, + 0xd760007b, 0x00011f00, + 0xbefe007a, 0xbeff007b, + 0xbef4007e, 0x8b75ff7f, + 0x0000ffff, 0x8c75ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x10807fac, + 0xbef1007d, 0xbef00080, + 0xb8f302dc, 0x84739973, + 0xbefe00c1, 0x857d9973, + 0x8b7d817d, 0xbf06817d, + 0xbfa20002, 0xbeff0080, + 0xbfa00002, 0xbeff00c1, + 0xbfa00009, 0xbef600ff, + 0x01000000, 0xe0685080, + 0x701d0100, 0xe0685100, + 0x701d0200, 0xe0685180, + 0x701d0300, 0xbfa00008, 0xbef600ff, 0x01000000, - 0xe0685080, 0x701d0100, - 0xe0685100, 0x701d0200, - 0xe0685180, 0x701d0300, - 0xbfa00008, 0xbef600ff, - 0x01000000, 0xe0685100, - 0x701d0100, 0xe0685200, - 0x701d0200, 0xe0685300, - 0x701d0300, 0xb8f03b05, - 0x80708170, 0xbf0d9973, - 0xbfa20002, 0x84708970, - 0xbfa00001, 0x84708a70, - 0xb8fa1e06, 0x847a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0xbef600ff, - 0x01000000, 0x7e000280, - 0x7e020280, 0x7e040280, - 0xbefd0080, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xd7610002, 0x0000fa6c, - 0x807d817d, 0x917aff6d, - 0x80000000, 0xd7610002, - 0x0000fa7a, 0x807d817d, - 0xd7610002, 0x0000fa6e, - 0x807d817d, 0xd7610002, - 0x0000fa6f, 0x807d817d, - 0xd7610002, 0x0000fa78, - 0x807d817d, 0xb8faf803, - 0xd7610002, 0x0000fa7a, - 0x807d817d, 0xd7610002, - 0x0000fa7b, 0x807d817d, - 0xb8f1f801, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f814, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f815, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xbefe00ff, 0x0000ffff, - 0xbeff0080, 0xe0685000, - 0x701d0200, 0xbefe00c1, + 0xe0685100, 0x701d0100, + 0xe0685200, 0x701d0200, + 0xe0685300, 0x701d0300, 0xb8f03b05, 0x80708170, 0xbf0d9973, 0xbfa20002, 0x84708970, 0xbfa00001, 0x84708a70, 0xb8fa1e06, 0x847a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, 0xbef600ff, 0x01000000, - 0xbef90080, 0xbefd0080, - 0xbf800000, 0xbe804100, - 0xbe824102, 0xbe844104, - 0xbe864106, 0xbe884108, - 0xbe8a410a, 0xbe8c410c, - 0xbe8e410e, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x7e000280, 0x7e020280, + 0x7e040280, 0xbefd0080, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xd7610002, + 0x0000fa6c, 0x807d817d, + 0x917aff6d, 0x80000000, + 0xd7610002, 0x0000fa7a, + 0x807d817d, 0xd7610002, + 0x0000fa6e, 0x807d817d, + 0xd7610002, 0x0000fa6f, + 0x807d817d, 0xd7610002, + 0x0000fa78, 0x807d817d, + 0xb8faf803, 0xd7610002, + 0x0000fa7a, 0x807d817d, + 0xd7610002, 0x0000fa7b, + 0x807d817d, 0xb8f1f801, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xb8f1f814, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xb8f1f815, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xbefe00ff, + 0x0000ffff, 0xbeff0080, + 0xe0685000, 0x701d0200, + 0xbefe00c1, 0xb8f03b05, + 0x80708170, 0xbf0d9973, + 0xbfa20002, 0x84708970, + 0xbfa00001, 0x84708a70, + 0xb8fa1e06, 0x847a8a7a, + 0x80707a70, 0xbef600ff, + 0x01000000, 0xbef90080, + 0xbefd0080, 0xbf800000, + 0xbe804100, 0xbe824102, + 0xbe844104, 0xbe864106, + 0xbe884108, 0xbe8a410a, + 0xbe8c410c, 0xbe8e410e, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, 0x80798179, 0xd7610002, - 0x0000f20c, 0x80798179, - 0xd7610002, 0x0000f20d, + 0x0000f20b, 0x80798179, + 0xd7610002, 0x0000f20c, 0x80798179, 0xd7610002, - 0x0000f20e, 0x80798179, - 0xd7610002, 0x0000f20f, - 0x80798179, 0xbf06a079, - 0xbfa10006, 0xe0685000, - 0x701d0200, 0x8070ff70, - 0x00000080, 0xbef90080, - 0x7e040280, 0x807d907d, - 0xbf0aff7d, 0x00000060, - 0xbfa2ffbc, 0xbe804100, - 0xbe824102, 0xbe844104, - 0xbe864106, 0xbe884108, - 0xbe8a410a, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x0000f20d, 0x80798179, + 0xd7610002, 0x0000f20e, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f20f, 0x80798179, + 0xbf06a079, 0xbfa10006, + 0xe0685000, 0x701d0200, + 0x8070ff70, 0x00000080, + 0xbef90080, 0x7e040280, + 0x807d907d, 0xbf0aff7d, + 0x00000060, 0xbfa2ffbc, + 0xbe804100, 0xbe824102, + 0xbe844104, 0xbe864106, + 0xbe884108, 0xbe8a410a, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, - 0x80798179, 0xe0685000, - 0x701d0200, 0xbefe00c1, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8fb4306, - 0x8b7bc17b, 0xbfa10044, - 0xbfbd0000, 0x8b7aff6d, - 0x80000000, 0xbfa10040, - 0x847b867b, 0x847b827b, - 0xbef6007b, 0xb8f03b05, - 0x80708170, 0xbf0d9973, - 0xbfa20002, 0x84708970, - 0xbfa00001, 0x84708a70, - 0xb8fa1e06, 0x847a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0x8070ff70, - 0x00000080, 0xbef600ff, - 0x01000000, 0xd71f0000, - 0x000100c1, 0xd7200000, - 0x000200c1, 0x16000084, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbefd0080, - 0xbfa20012, 0xbe8300ff, - 0x00000080, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf890000, 0xe0685000, - 0x701d0100, 0x807d037d, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a7b7d, 0xbfa2fff4, - 0xbfa00011, 0xbe8300ff, - 0x00000100, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf890000, 0xe0685000, - 0x701d0100, 0x807d037d, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a7b7d, 0xbfa2fff4, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, + 0x80798179, 0xd7610002, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, + 0x80798179, 0xd7610002, + 0x0000f20b, 0x80798179, + 0xe0685000, 0x701d0200, 0xbefe00c1, 0x857d9973, 0x8b7d817d, 0xbf06817d, - 0xbfa20004, 0xbef000ff, - 0x00000200, 0xbeff0080, - 0xbfa00003, 0xbef000ff, - 0x00000400, 0xbeff00c1, - 0xb8fb3b05, 0x807b817b, - 0x847b827b, 0x857d9973, + 0xbfa20002, 0xbeff0080, + 0xbfa00001, 0xbeff00c1, + 0xb8fb4306, 0x8b7bc17b, + 0xbfa10044, 0xbfbd0000, + 0x8b7aff6d, 0x80000000, + 0xbfa10040, 0x847b867b, + 0x847b827b, 0xbef6007b, + 0xb8f03b05, 0x80708170, + 0xbf0d9973, 0xbfa20002, + 0x84708970, 0xbfa00001, + 0x84708a70, 0xb8fa1e06, + 0x847a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, + 0x8070ff70, 0x00000080, + 0xbef600ff, 0x01000000, + 0xd71f0000, 0x000100c1, + 0xd7200000, 0x000200c1, + 0x16000084, 0x857d9973, 0x8b7d817d, 0xbf06817d, - 0xbfa20017, 0xbef600ff, - 0x01000000, 0xbefd0084, - 0xbf0a7b7d, 0xbfa10037, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xe0685000, 0x701d0000, - 0xe0685080, 0x701d0100, - 0xe0685100, 0x701d0200, - 0xe0685180, 0x701d0300, - 0x807d847d, 0x8070ff70, - 0x00000200, 0xbf0a7b7d, - 0xbfa2ffef, 0xbfa00025, + 0xbefd0080, 0xbfa20012, + 0xbe8300ff, 0x00000080, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf890000, + 0xe0685000, 0x701d0100, + 0x807d037d, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000080, 0xbf0a7b7d, + 0xbfa2fff4, 0xbfa00011, + 0xbe8300ff, 0x00000100, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf890000, + 0xe0685000, 0x701d0100, + 0x807d037d, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a7b7d, + 0xbfa2fff4, 0xbefe00c1, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa20004, + 0xbef000ff, 0x00000200, + 0xbeff0080, 0xbfa00003, + 0xbef000ff, 0x00000400, + 0xbeff00c1, 0xb8fb3b05, + 0x807b817b, 0x847b827b, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa20017, 0xbef600ff, 0x01000000, 0xbefd0084, 0xbf0a7b7d, - 0xbfa10011, 0x7e008700, + 0xbfa10037, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xe0685000, - 0x701d0000, 0xe0685100, - 0x701d0100, 0xe0685200, - 0x701d0200, 0xe0685300, + 0x701d0000, 0xe0685080, + 0x701d0100, 0xe0685100, + 0x701d0200, 0xe0685180, 0x701d0300, 0x807d847d, - 0x8070ff70, 0x00000400, + 0x8070ff70, 0x00000200, 0xbf0a7b7d, 0xbfa2ffef, - 0xb8fb1e06, 0x8b7bc17b, - 0xbfa1000c, 0x847b837b, - 0x807b7d7b, 0xbefe00c1, - 0xbeff0080, 0x7e008700, + 0xbfa00025, 0xbef600ff, + 0x01000000, 0xbefd0084, + 0xbf0a7b7d, 0xbfa10011, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xe0685000, 0x701d0000, - 0x807d817d, 0x8070ff70, - 0x00000080, 0xbf0a7b7d, - 0xbfa2fff8, 0xbfa00146, - 0xbef4007e, 0x8b75ff7f, - 0x0000ffff, 0x8c75ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x10807fac, - 0xb8f202dc, 0x84729972, - 0x8b6eff7f, 0x04000000, - 0xbfa1003a, 0xbefe00c1, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8ef4306, - 0x8b6fc16f, 0xbfa1002f, - 0x846f866f, 0x846f826f, - 0xbef6006f, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x8078ff78, - 0x00000080, 0xbef600ff, - 0x01000000, 0x857d9972, - 0x8b7d817d, 0xbf06817d, - 0xbefd0080, 0xbfa2000c, - 0xe0500000, 0x781d0000, - 0xbf8903f7, 0xdac00000, - 0x00000000, 0x807dff7d, - 0x00000080, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff5, 0xbfa0000b, - 0xe0500000, 0x781d0000, - 0xbf8903f7, 0xdac00000, - 0x00000000, 0x807dff7d, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7d, - 0xbfa2fff5, 0xbef80080, + 0xe0685100, 0x701d0100, + 0xe0685200, 0x701d0200, + 0xe0685300, 0x701d0300, + 0x807d847d, 0x8070ff70, + 0x00000400, 0xbf0a7b7d, + 0xbfa2ffef, 0xb8fb1e06, + 0x8b7bc17b, 0xbfa1000c, + 0x847b837b, 0x807b7d7b, + 0xbefe00c1, 0xbeff0080, + 0x7e008700, 0xe0685000, + 0x701d0000, 0x807d817d, + 0x8070ff70, 0x00000080, + 0xbf0a7b7d, 0xbfa2fff8, + 0xbfa00146, 0xbef4007e, + 0x8b75ff7f, 0x0000ffff, + 0x8c75ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x10807fac, 0xb8f202dc, + 0x84729972, 0x8b6eff7f, + 0x04000000, 0xbfa1003a, 0xbefe00c1, 0x857d9972, 0x8b7d817d, 0xbf06817d, 0xbfa20002, 0xbeff0080, 0xbfa00001, 0xbeff00c1, - 0xb8ef3b05, 0x806f816f, - 0x846f826f, 0x857d9972, - 0x8b7d817d, 0xbf06817d, - 0xbfa20024, 0xbef600ff, - 0x01000000, 0xbeee0078, + 0xb8ef4306, 0x8b6fc16f, + 0xbfa1002f, 0x846f866f, + 0x846f826f, 0xbef6006f, + 0xb8f83b05, 0x80788178, + 0xbf0d9972, 0xbfa20002, + 0x84788978, 0xbfa00001, + 0x84788a78, 0xb8ee1e06, + 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0xbefd0084, 0xbf0a6f7d, - 0xbfa10050, 0xe0505000, - 0x781d0000, 0xe0505080, - 0x781d0100, 0xe0505100, - 0x781d0200, 0xe0505180, - 0x781d0300, 0xbf8903f7, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807d847d, 0x8078ff78, - 0x00000200, 0xbf0a6f7d, - 0xbfa2ffee, 0xe0505000, - 0x6e1d0000, 0xe0505080, - 0x6e1d0100, 0xe0505100, - 0x6e1d0200, 0xe0505180, - 0x6e1d0300, 0xbf8903f7, - 0xbfa00034, 0xbef600ff, - 0x01000000, 0xbeee0078, - 0x8078ff78, 0x00000400, - 0xbefd0084, 0xbf0a6f7d, - 0xbfa10012, 0xe0505000, - 0x781d0000, 0xe0505100, - 0x781d0100, 0xe0505200, - 0x781d0200, 0xe0505300, - 0x781d0300, 0xbf8903f7, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807d847d, 0x8078ff78, - 0x00000400, 0xbf0a6f7d, - 0xbfa2ffee, 0xb8ef1e06, - 0x8b6fc16f, 0xbfa1000e, - 0x846f836f, 0x806f7d6f, - 0xbefe00c1, 0xbeff0080, + 0x8078ff78, 0x00000080, + 0xbef600ff, 0x01000000, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbefd0080, + 0xbfa2000c, 0xe0500000, + 0x781d0000, 0xbf8903f7, + 0xdac00000, 0x00000000, + 0x807dff7d, 0x00000080, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff5, + 0xbfa0000b, 0xe0500000, + 0x781d0000, 0xbf8903f7, + 0xdac00000, 0x00000000, + 0x807dff7d, 0x00000100, + 0x8078ff78, 0x00000100, + 0xbf0a6f7d, 0xbfa2fff5, + 0xbef80080, 0xbefe00c1, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbfa20002, + 0xbeff0080, 0xbfa00001, + 0xbeff00c1, 0xb8ef3b05, + 0x806f816f, 0x846f826f, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbfa20024, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000200, 0xbefd0084, + 0xbf0a6f7d, 0xbfa10050, 0xe0505000, 0x781d0000, + 0xe0505080, 0x781d0100, + 0xe0505100, 0x781d0200, + 0xe0505180, 0x781d0300, 0xbf8903f7, 0x7e008500, - 0x807d817d, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff7, 0xbeff00c1, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807d847d, + 0x8078ff78, 0x00000200, + 0xbf0a6f7d, 0xbfa2ffee, 0xe0505000, 0x6e1d0000, - 0xe0505100, 0x6e1d0100, - 0xe0505200, 0x6e1d0200, - 0xe0505300, 0x6e1d0300, - 0xbf8903f7, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef600ff, - 0x01000000, 0xbefd00ff, - 0x0000006c, 0x80f89078, - 0xf428403a, 0xf0000000, - 0xbf89fc07, 0x80fd847d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0x80f8a078, - 0xf42c403a, 0xf0000000, - 0xbf89fc07, 0x80fd887d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0x80f8c078, - 0xf430403a, 0xf0000000, - 0xbf89fc07, 0x80fd907d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0xbe884308, - 0xbe8a430a, 0xbe8c430c, - 0xbe8e430e, 0xbf06807d, - 0xbfa1fff0, 0xb980f801, - 0x00000000, 0xbfbd0000, + 0xe0505080, 0x6e1d0100, + 0xe0505100, 0x6e1d0200, + 0xe0505180, 0x6e1d0300, + 0xbf8903f7, 0xbfa00034, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbefd0084, + 0xbf0a6f7d, 0xbfa10012, + 0xe0505000, 0x781d0000, + 0xe0505100, 0x781d0100, + 0xe0505200, 0x781d0200, + 0xe0505300, 0x781d0300, + 0xbf8903f7, 0x7e008500, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807d847d, + 0x8078ff78, 0x00000400, + 0xbf0a6f7d, 0xbfa2ffee, + 0xb8ef1e06, 0x8b6fc16f, + 0xbfa1000e, 0x846f836f, + 0x806f7d6f, 0xbefe00c1, + 0xbeff0080, 0xe0505000, + 0x781d0000, 0xbf8903f7, + 0x7e008500, 0x807d817d, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff7, + 0xbeff00c1, 0xe0505000, + 0x6e1d0000, 0xe0505100, + 0x6e1d0100, 0xe0505200, + 0x6e1d0200, 0xe0505300, + 0x6e1d0300, 0xbf8903f7, 0xb8f83b05, 0x80788178, 0xbf0d9972, 0xbfa20002, 0x84788978, 0xbfa00001, 0x84788a78, 0xb8ee1e06, 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, 0xbef600ff, 0x01000000, - 0xf4205bfa, 0xf0000000, - 0x80788478, 0xf4205b3a, + 0xbefd00ff, 0x0000006c, + 0x80f89078, 0xf428403a, + 0xf0000000, 0xbf89fc07, + 0x80fd847d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0x80f8a078, 0xf42c403a, + 0xf0000000, 0xbf89fc07, + 0x80fd887d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0x80f8c078, 0xf430403a, + 0xf0000000, 0xbf89fc07, + 0x80fd907d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0xbe884308, 0xbe8a430a, + 0xbe8c430c, 0xbe8e430e, + 0xbf06807d, 0xbfa1fff0, + 0xb980f801, 0x00000000, + 0xbfbd0000, 0xb8f83b05, + 0x80788178, 0xbf0d9972, + 0xbfa20002, 0x84788978, + 0xbfa00001, 0x84788a78, + 0xb8ee1e06, 0x846e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef600ff, + 0x01000000, 0xf4205bfa, 0xf0000000, 0x80788478, - 0xf4205b7a, 0xf0000000, - 0x80788478, 0xf4205c3a, + 0xf4205b3a, 0xf0000000, + 0x80788478, 0xf4205b7a, 0xf0000000, 0x80788478, - 0xf4205c7a, 0xf0000000, - 0x80788478, 0xf4205eba, + 0xf4205c3a, 0xf0000000, + 0x80788478, 0xf4205c7a, 0xf0000000, 0x80788478, - 0xf4205efa, 0xf0000000, - 0x80788478, 0xf4205e7a, + 0xf4205eba, 0xf0000000, + 0x80788478, 0xf4205efa, 0xf0000000, 0x80788478, - 0xf4205cfa, 0xf0000000, - 0x80788478, 0xf4205bba, + 0xf4205e7a, 0xf0000000, + 0x80788478, 0xf4205cfa, 0xf0000000, 0x80788478, - 0xbf89fc07, 0xb96ef814, 0xf4205bba, 0xf0000000, 0x80788478, 0xbf89fc07, - 0xb96ef815, 0xbefd006f, - 0xbefe0070, 0xbeff0071, - 0x8b6f7bff, 0x000003ff, - 0xb96f4803, 0x8b6f7bff, - 0xfffff800, 0x856f8b6f, - 0xb96fa2c3, 0xb973f801, - 0xb8ee3b05, 0x806e816e, - 0xbf0d9972, 0xbfa20002, - 0x846e896e, 0xbfa00001, - 0x846e8a6e, 0xb8ef1e06, - 0x846f8a6f, 0x806e6f6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x8b6fff6f, 0x0000ffff, - 0xf4085c37, 0xf8000050, - 0xf4085d37, 0xf8000060, - 0xf4005e77, 0xf8000074, - 0xbf89fc07, 0x8b6dff6d, - 0x0000ffff, 0x8bfe7e7e, - 0x8bea6a6a, 0xb8eef802, - 0xbf0d866e, 0xbfa20002, - 0xb97af802, 0xbe80486c, - 0xb97af802, 0xbe804a6c, - 0xbfb00000, 0xbf9f0000, + 0xb96ef814, 0xf4205bba, + 0xf0000000, 0x80788478, + 0xbf89fc07, 0xb96ef815, + 0xbefd006f, 0xbefe0070, + 0xbeff0071, 0x8b6f7bff, + 0x000003ff, 0xb96f4803, + 0x8b6f7bff, 0xfffff800, + 0x856f8b6f, 0xb96fa2c3, + 0xb973f801, 0xb8ee3b05, + 0x806e816e, 0xbf0d9972, + 0xbfa20002, 0x846e896e, + 0xbfa00001, 0x846e8a6e, + 0xb8ef1e06, 0x846f8a6f, + 0x806e6f6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x8b6fff6f, + 0x0000ffff, 0xf4085c37, + 0xf8000050, 0xf4085d37, + 0xf8000060, 0xf4005e77, + 0xf8000074, 0xbf89fc07, + 0x8b6dff6d, 0x0000ffff, + 0x8bfe7e7e, 0x8bea6a6a, + 0xb8eef802, 0xbf0d866e, + 0xbfa20002, 0xb97af802, + 0xbe80486c, 0xb97af802, + 0xbe804a6c, 0xbfb00000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 0f81670f6f9c..8b92c33c2a7c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -186,6 +186,12 @@ L_SKIP_RESTORE: s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) #if SW_SA_TRAP + // If ttmp1[30] is set then issue s_barrier to unblock dependent waves. + s_bitcmp1_b32 s_save_pc_hi, 30 + s_cbranch_scc0 L_TRAP_NO_BARRIER + s_barrier + +L_TRAP_NO_BARRIER: // If ttmp1[31] is set then trap may occur early. // Spin wait until SAVECTX exception is raised. s_bitcmp1_b32 s_save_pc_hi, 31 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5feaba6a77de..6d291aa6386b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep, { int ret; uint32_t num_devices, num_bos, num_objects; - uint64_t priv_size, priv_offset = 0; + uint64_t priv_size, priv_offset = 0, bo_priv_offset; if (!args->devices || !args->bos || !args->priv_data) return -EINVAL; @@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep, if (ret) goto exit_unlock; - ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, - (uint8_t __user *)args->priv_data, &priv_offset); - if (ret) - goto exit_unlock; + /* Leave room for BOs in the private data. They need to be restored + * before events, but we checkpoint them last to simplify the error + * handling. + */ + bo_priv_offset = priv_offset; + priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data); if (num_objects) { ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; } -close_bo_fds: - if (ret) { - /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */ - uint32_t i; - struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos; - - for (i = 0; i < num_bos; i++) { - if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) - close_fd(bo_buckets[i].dmabuf_fd); - } - } + /* This must be the last thing in this function that can fail. + * Otherwise we leak dmabuf file descriptors. + */ + ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, + (uint8_t __user *)args->priv_data, &bo_priv_offset); exit_unlock: mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 83e3ce9f6049..729d26d648af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd, ret = create_other_event(p, ev, &ev_priv->event_id); break; } + mutex_unlock(&p->event_mutex); exit: if (ret) @@ -513,8 +514,6 @@ exit: kfree(ev_priv); - mutex_unlock(&p->event_mutex); - return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2797029bd500..22b077ac9a19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -973,12 +973,10 @@ out_unlock_prange: out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: + pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); kfd_unref_process(p); out_mmput: mmput(mm); - - pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); - return r ? VM_FAULT_SIGBUS : 0; } diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 6925e0280dbe..f4f3d2665a6b 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,6 +5,7 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y + depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 select SND_HDA_COMPONENT if SND_HDA_CORE select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) help @@ -12,6 +13,12 @@ config DRM_AMD_DC support for AMDGPU. This adds required support for Vega and Raven ASICs. + calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64) + architectures built with Clang (all released versions), whereby the stack + frame gets blown up to well over 5k. This would cause an immediate kernel + panic on most architectures. We'll revert this when the following bug report + has been resolved: https://github.com/llvm/llvm-project/issues/41896. + config DRM_AMD_DC_DCN def_bool n help diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c053cb79cd06..512c32327eb1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -147,6 +147,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); /* Number of bytes in PSP footer for firmware. */ #define PSP_FOOTER_BYTES 0x100 +/* + * DMUB Async to Sync Mechanism Status + */ +#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 +#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 +#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 +#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4 + /** * DOC: overview * @@ -1364,7 +1372,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, {} + /* TODO: refactor this from a fixed table to a dynamic option */ }; static void retrieve_dmi_info(struct amdgpu_display_manager *dm) @@ -1549,6 +1594,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ + adev->dm.dc->debug.ignore_cable_id = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1634,12 +1682,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } } - if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); - goto error; - } - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. @@ -1647,6 +1689,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) dc_enable_dmub_outbox(adev->dm.dc); + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -6464,7 +6512,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j; + int i, j, ret; int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -6511,8 +6559,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, - false); + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, + dm_conn_state->pbn, false); + if (ret < 0) + return ret; + continue; } @@ -7619,9 +7670,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, - new_crtc_state, - &bundle->flip_addrs[planes_count]); + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count]); /* * Only allow immediate flips for fast updates that don't @@ -9525,10 +9577,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, #if defined(CONFIG_DRM_AMD_DC_DCN) if (dc_resource_is_dsc_encoding_supported(dc)) { - if (!pre_validate_dsc(state, &dm_state, vars)) { - ret = -EINVAL; + ret = pre_validate_dsc(state, &dm_state, vars); + if (ret != 0) goto fail; - } } #endif @@ -9623,9 +9674,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; goto fail; } @@ -10105,6 +10156,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, *operation_result = AUX_RET_ERROR_TIMEOUT; } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) { + *operation_result = AUX_RET_ERROR_INVALID_REPLY; } else { *operation_result = AUX_RET_ERROR_UNKNOWN; } @@ -10152,6 +10205,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; if (!payload->write && adev->dm.dmub_notify->aux_reply.length && payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { + + if (payload->length != adev->dm.dmub_notify->aux_reply.length) { + DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n", + payload->address, payload->length, + adev->dm.dmub_notify->aux_reply.length); + return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx, + DMUB_ASYNC_TO_SYNC_ACCESS_INVALID, + (uint32_t *)operation_result); + } + memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, adev->dm.dmub_notify->aux_reply.length); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b5ce15c43bcc..635c398fcefe 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -51,12 +51,6 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 /* - * DMUB Async to Sync Mechanism Status - */ -#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 -#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 -#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 -/* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 594fe8a4d02b..64dd02970292 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -412,7 +412,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - + bool is_dcn; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -450,8 +450,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, acrtc->otg_inst = -1; dm->adev->mode_info.crtcs[crtc_index] = acrtc; - drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, + + /* Don't enable DRM CRTC degamma property for DCE since it doesn't + * support programmable degamma anywhere. + */ + is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; + drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); + drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 6ff96b4bdda5..6483ba266893 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -703,13 +703,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) return dsc_config.bits_per_pixel; } -static bool increase_dsc_bpp(struct drm_atomic_state *state, - struct drm_dp_mst_topology_state *mst_state, - struct dc_link *dc_link, - struct dsc_mst_fairness_params *params, - struct dsc_mst_fairness_vars *vars, - int count, - int k) +static int increase_dsc_bpp(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -719,6 +719,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, int remaining_to_increase = 0; int link_timeslots_used; int fair_pbn_alloc; + int ret = 0; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { @@ -757,52 +758,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { vars[next_index].pbn -= fair_pbn_alloc; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; } } else { vars[next_index].pbn += initial_slack[next_index]; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { vars[next_index].pbn -= initial_slack[next_index]; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; } } bpp_increased[next_index] = true; remaining_to_increase--; } - return true; + return 0; } -static bool try_disable_dsc(struct drm_atomic_state *state, - struct dc_link *dc_link, - struct dsc_mst_fairness_params *params, - struct dsc_mst_fairness_vars *vars, - int count, - int k) +static int try_disable_dsc(struct drm_atomic_state *state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -810,6 +819,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state, int max_kbps_increase; int next_index; int remaining_to_try = 0; + int ret; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -840,49 +850,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state, break; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; } tried[next_index] = true; remaining_to_try--; } - return true; + return 0; } -static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dc_link *dc_link, - struct dsc_mst_fairness_vars *vars, - struct drm_dp_mst_topology_mgr *mgr, - int *link_vars_start_index) +static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars, + struct drm_dp_mst_topology_mgr *mgr, + int *link_vars_start_index) { struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); int count = 0; - int i, k; + int i, k, ret; bool debugfs_overwrite = false; memset(params, 0, sizeof(params)); if (IS_ERR(mst_state)) - return false; + return PTR_ERR(mst_state); mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -933,7 +946,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (count == 0) { ASSERT(0); - return true; + return 0; } /* k is start index of vars for current phy link used by mst hub */ @@ -947,13 +960,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, - vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, + vars[i + k].pbn); + if (ret < 0) + return ret; } - if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { + ret = drm_dp_mst_atomic_check(state); + if (ret == 0 && !debugfs_overwrite) { set_dsc_configs_from_fairness_vars(params, vars, count, k); - return true; + return 0; + } else if (ret != -ENOSPC) { + return ret; } /* Try max compression */ @@ -962,31 +979,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, - params[i].port, vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; } else { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, - params[i].port, vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; } } - if (drm_dp_mst_atomic_check(state)) - return false; + ret = drm_dp_mst_atomic_check(state); + if (ret != 0) + return ret; /* Optimize degree of compression */ - if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k)) - return false; + ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); + if (ret < 0) + return ret; - if (!try_disable_dsc(state, dc_link, params, vars, count, k)) - return false; + ret = try_disable_dsc(state, dc_link, params, vars, count, k); + if (ret < 0) + return ret; set_dsc_configs_from_fairness_vars(params, vars, count, k); - return true; + return 0; } static bool is_dsc_need_re_compute( @@ -1087,15 +1109,17 @@ static bool is_dsc_need_re_compute( return is_dsc_need_re_compute; } -bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars) +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; int link_vars_start_index = 0; + int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -1108,7 +1132,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink) + if (!aconnector || !aconnector->dc_sink || !aconnector->port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1118,19 +1142,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, continue; if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) - return false; + return -EINVAL; if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, - &aconnector->mst_mgr, - &link_vars_start_index)) { - mutex_unlock(&aconnector->mst_mgr.lock); - return false; - } - mutex_unlock(&aconnector->mst_mgr.lock); + mst_mgr = aconnector->port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) @@ -1143,22 +1164,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (stream->timing.flags.DSC == 1) if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) - return false; + return -EINVAL; } - return true; + return ret; } -static bool - pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars) +static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; int link_vars_start_index = 0; + int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -1171,7 +1193,7 @@ static bool aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink) + if (!aconnector || !aconnector->dc_sink || !aconnector->port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1183,14 +1205,11 @@ static bool if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, - &aconnector->mst_mgr, - &link_vars_start_index)) { - mutex_unlock(&aconnector->mst_mgr.lock); - return false; - } - mutex_unlock(&aconnector->mst_mgr.lock); + mst_mgr = aconnector->port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) @@ -1198,7 +1217,7 @@ static bool } } - return true; + return ret; } static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, @@ -1253,9 +1272,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state) return ret; } -bool pre_validate_dsc(struct drm_atomic_state *state, - struct dm_atomic_state **dm_state_ptr, - struct dsc_mst_fairness_vars *vars) +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars) { int i; struct dm_atomic_state *dm_state; @@ -1264,11 +1283,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state, if (!is_dsc_precompute_needed(state)) { DRM_INFO_ONCE("DSC precompute is not needed.\n"); - return true; + return 0; } - if (dm_atomic_get_state(state, dm_state_ptr)) { + ret = dm_atomic_get_state(state, dm_state_ptr); + if (ret != 0) { DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); - return false; + return ret; } dm_state = *dm_state_ptr; @@ -1280,7 +1300,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state, local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); if (!local_dc_state) - return false; + return -ENOMEM; for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; @@ -1316,9 +1336,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state, if (ret != 0) goto clean_exit; - if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { + ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); + if (ret != 0) { DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; goto clean_exit; } @@ -1349,7 +1369,7 @@ clean_exit: kfree(local_dc_state); - return (ret == 0); + return ret; } static unsigned int kbps_from_pbn(unsigned int pbn) @@ -1392,6 +1412,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; + struct drm_dp_mst_topology_mgr *mst_mgr; /* * check if the mode could be supported if DSC pass-through is supported @@ -1400,7 +1421,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( */ if (is_dsc_common_config_possible(stream, &bw_range) && aconnector->port->passthrough_aux) { - mutex_lock(&aconnector->mst_mgr.lock); + mst_mgr = aconnector->port->mgr; + mutex_lock(&mst_mgr->lock); cur_link_settings = stream->link->verified_link_cap; @@ -1413,7 +1435,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - mutex_unlock(&aconnector->mst_mgr.lock); + mutex_unlock(&mst_mgr->lock); /* * use the maximum dsc compression bandwidth as the required diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index b92a7c5671aa..97fd70df531b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars { struct amdgpu_dm_connector *aconnector; }; -bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars); +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); bool needs_dsc_aux_workaround(struct dc_link *link); -bool pre_validate_dsc(struct drm_atomic_state *state, - struct dm_atomic_state **dm_state_ptr, - struct dsc_mst_fairness_vars *vars); +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars); enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ee0456b5e14e..e0c8d6f09bb4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2393,6 +2393,26 @@ static enum bp_result get_vram_info_v25( return result; } +static enum bp_result get_vram_info_v30( + struct bios_parser *bp, + struct dc_vram_info *info) +{ + struct atom_vram_info_header_v3_0 *info_v30; + enum bp_result result = BP_RESULT_OK; + + info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0, + DATA_TABLES(vram_info)); + + if (info_v30 == NULL) + return BP_RESULT_BADBIOSTABLE; + + info->num_chans = info_v30->channel_num; + info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8; + + return result; +} + + /* * get_integrated_info_v11 * @@ -3060,6 +3080,16 @@ static enum bp_result bios_parser_get_vram_info( } break; + case 3: + switch (revision.minor) { + case 0: + result = get_vram_info_v30(bp, info); + break; + default: + break; + } + break; + default: return result; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 1131c6d73f6c..20a06c04e4a1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } @@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index ef0795b14a1f..2db595672a46 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t result; result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000); - ASSERT(result == VBIOSSMC_Result_OK); - smu_print("SMU response after wait: %d\n", result); + if (result != VBIOSSMC_Result_OK) + smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", + result); if (result == VBIOSSMC_Status_BUSY) return -1; @@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); +#ifdef DBG + smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", + actual_dcfclk_set_mhz, + actual_dcfclk_set_mhz * 1000); +#endif + return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 1c612ccf1944..6f77d8e538ab 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -157,6 +157,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); unsigned int num_levels; struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + unsigned int i; memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); clk_mgr_base->clks.p_state_change_support = true; @@ -205,18 +206,17 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) clk_mgr->dpm_present = true; if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) { - unsigned int i; - for (i = 0; i < num_levels; i++) if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz)) clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz); } + for (i = 0; i < num_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz > 1950) + clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = 1950; if (clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz) { - unsigned int i; - for (i = 0; i < num_levels; i++) if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz)) @@ -669,6 +669,9 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); + /* memclk must have at least one level */ + num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; + dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bfc5474c0f4c..0598465fd1a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -852,6 +852,8 @@ struct dc_debug_options { bool enable_double_buffered_dsc_pg_support; bool enable_dp_dig_pixel_rate_div_policy; enum lttpr_mode lttpr_mode_override; + unsigned int dsc_delay_factor_wa_x1000; + unsigned int min_prefetch_in_strobe_ns; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 1b70b78e2fa1..af631085e88c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -359,7 +359,8 @@ static const struct dce_audio_registers audio_regs[] = { audio_regs(2), audio_regs(3), audio_regs(4), - audio_regs(5) + audio_regs(5), + audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index b9765b3899e1..ef52e6b6eccf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -436,34 +436,48 @@ void dpp1_set_cursor_position( uint32_t height) { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; - int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; + int x_pos = pos->x - param->viewport.x; + int y_pos = pos->y - param->viewport.y; + int x_hotspot = pos->x_hotspot; + int y_hotspot = pos->y_hotspot; + int src_x_offset = x_pos - pos->x_hotspot; + int src_y_offset = y_pos - pos->y_hotspot; + int cursor_height = (int)height; + int cursor_width = (int)width; uint32_t cur_en = pos->enable ? 1 : 0; - // Cursor width/height and hotspots need to be rotated for offset calculation + // Transform cursor width / height and hotspots for offset calculations if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { - swap(width, height); + swap(cursor_height, cursor_width); + swap(x_hotspot, y_hotspot); + if (param->rotation == ROTATION_ANGLE_90) { - src_x_offset = pos->x - pos->y_hotspot - param->viewport.x; - src_y_offset = pos->y - pos->x_hotspot - param->viewport.y; + // hotspot = (-y, x) + src_x_offset = x_pos - (cursor_width - x_hotspot); + src_y_offset = y_pos - y_hotspot; + } else if (param->rotation == ROTATION_ANGLE_270) { + // hotspot = (y, -x) + src_x_offset = x_pos - x_hotspot; + src_y_offset = y_pos - (cursor_height - y_hotspot); } } else if (param->rotation == ROTATION_ANGLE_180) { + // hotspot = (-x, -y) if (!param->mirror) - src_x_offset = pos->x - param->viewport.x; + src_x_offset = x_pos - (cursor_width - x_hotspot); - src_y_offset = pos->y - param->viewport.y; + src_y_offset = y_pos - (cursor_height - y_hotspot); } if (src_x_offset >= (int)param->viewport.width) cur_en = 0; /* not visible beyond right edge*/ - if (src_x_offset + (int)width <= 0) + if (src_x_offset + cursor_width <= 0) cur_en = 0; /* not visible beyond left edge*/ if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset + (int)height <= 0) + if (src_y_offset + cursor_height <= 0) cur_en = 0; /* not visible beyond top edge*/ REG_UPDATE(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 52e201e9b091..a142a00bc432 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1179,10 +1179,12 @@ void hubp1_cursor_set_position( const struct dc_cursor_mi_param *param) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; - int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; + int x_pos = pos->x - param->viewport.x; + int y_pos = pos->y - param->viewport.y; int x_hotspot = pos->x_hotspot; int y_hotspot = pos->y_hotspot; + int src_x_offset = x_pos - pos->x_hotspot; + int src_y_offset = y_pos - pos->y_hotspot; int cursor_height = (int)hubp->curs_attr.height; int cursor_width = (int)hubp->curs_attr.width; uint32_t dst_x_offset; @@ -1200,18 +1202,26 @@ void hubp1_cursor_set_position( if (hubp->curs_attr.address.quad_part == 0) return; - // Rotated cursor width/height and hotspots tweaks for offset calculation + // Transform cursor width / height and hotspots for offset calculations if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { swap(cursor_height, cursor_width); + swap(x_hotspot, y_hotspot); + if (param->rotation == ROTATION_ANGLE_90) { - src_x_offset = pos->x - pos->y_hotspot - param->viewport.x; - src_y_offset = pos->y - pos->x_hotspot - param->viewport.y; + // hotspot = (-y, x) + src_x_offset = x_pos - (cursor_width - x_hotspot); + src_y_offset = y_pos - y_hotspot; + } else if (param->rotation == ROTATION_ANGLE_270) { + // hotspot = (y, -x) + src_x_offset = x_pos - x_hotspot; + src_y_offset = y_pos - (cursor_height - y_hotspot); } } else if (param->rotation == ROTATION_ANGLE_180) { + // hotspot = (-x, -y) if (!param->mirror) - src_x_offset = pos->x - param->viewport.x; + src_x_offset = x_pos - (cursor_width - x_hotspot); - src_y_offset = pos->y - param->viewport.y; + src_y_offset = y_pos - (cursor_height - y_hotspot); } dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0; @@ -1248,8 +1258,8 @@ void hubp1_cursor_set_position( CURSOR_Y_POSITION, pos->y); REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, x_hotspot, - CURSOR_HOT_SPOT_Y, y_hotspot); + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); REG_SET(CURSOR_DST_OFFSET, 0, CURSOR_DST_X_OFFSET, dst_x_offset); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 4996d2810edb..4566bc7abf17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -623,6 +623,10 @@ void hubp2_cursor_set_attributes( hubp->att.size.bits.width = attr->width; hubp->att.size.bits.height = attr->height; hubp->att.cur_ctl.bits.mode = attr->color_format; + + hubp->cur_rect.w = attr->width; + hubp->cur_rect.h = attr->height; + hubp->att.cur_ctl.bits.pitch = hw_pitch; hubp->att.cur_ctl.bits.line_per_chunk = lpc; hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION; @@ -969,10 +973,12 @@ void hubp2_cursor_set_position( const struct dc_cursor_mi_param *param) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; - int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; + int x_pos = pos->x - param->viewport.x; + int y_pos = pos->y - param->viewport.y; int x_hotspot = pos->x_hotspot; int y_hotspot = pos->y_hotspot; + int src_x_offset = x_pos - pos->x_hotspot; + int src_y_offset = y_pos - pos->y_hotspot; int cursor_height = (int)hubp->curs_attr.height; int cursor_width = (int)hubp->curs_attr.width; uint32_t dst_x_offset; @@ -990,18 +996,26 @@ void hubp2_cursor_set_position( if (hubp->curs_attr.address.quad_part == 0) return; - // Rotated cursor width/height and hotspots tweaks for offset calculation + // Transform cursor width / height and hotspots for offset calculations if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { swap(cursor_height, cursor_width); + swap(x_hotspot, y_hotspot); + if (param->rotation == ROTATION_ANGLE_90) { - src_x_offset = pos->x - pos->y_hotspot - param->viewport.x; - src_y_offset = pos->y - pos->x_hotspot - param->viewport.y; + // hotspot = (-y, x) + src_x_offset = x_pos - (cursor_width - x_hotspot); + src_y_offset = y_pos - y_hotspot; + } else if (param->rotation == ROTATION_ANGLE_270) { + // hotspot = (y, -x) + src_x_offset = x_pos - x_hotspot; + src_y_offset = y_pos - (cursor_height - y_hotspot); } } else if (param->rotation == ROTATION_ANGLE_180) { + // hotspot = (-x, -y) if (!param->mirror) - src_x_offset = pos->x - param->viewport.x; + src_x_offset = x_pos - (cursor_width - x_hotspot); - src_y_offset = pos->y - param->viewport.y; + src_y_offset = y_pos - (cursor_height - y_hotspot); } dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0; @@ -1038,8 +1052,8 @@ void hubp2_cursor_set_position( CURSOR_Y_POSITION, pos->y); REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, x_hotspot, - CURSOR_HOT_SPOT_Y, y_hotspot); + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); REG_SET(CURSOR_DST_OFFSET, 0, CURSOR_DST_X_OFFSET, dst_x_offset); @@ -1048,8 +1062,8 @@ void hubp2_cursor_set_position( hubp->pos.cur_ctl.bits.cur_enable = cur_en; hubp->pos.position.bits.x_pos = pos->x; hubp->pos.position.bits.y_pos = pos->y; - hubp->pos.hot_spot.bits.x_hot = x_hotspot; - hubp->pos.hot_spot.bits.y_hot = y_hotspot; + hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot; + hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot; hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset; /* Cursor Rectangle Cache * Cursor bitmaps have different hotspot values diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c index 84e1486f3d51..39a57bcd7866 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c @@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_init = hubp3_init, .set_unbounded_requesting = hubp31_set_unbounded_requesting, .hubp_soft_reset = hubp31_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 1bd7e0f327d8..389a8938ee45 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -96,6 +96,13 @@ static void dccg314_set_pixel_rate_div( struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA; + // Don't program 0xF into the register field. Not valid since + // K1 / K2 field is only 1 / 2 bits wide + if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) { + BREAK_TO_DEBUGGER(); + return; + } + dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2)) return; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 7e773bf7b895..38842f938bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -49,18 +49,30 @@ #define CTX \ enc1->base.ctx +static void enc314_reset_fifo(struct stream_encoder *enc, bool reset) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val = reset ? 1 : 0; + uint32_t is_symclk_on; + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); + REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + + if (is_symclk_on) + REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); + else + udelay(10); +} static void enc314_enable_fifo(struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - /* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */ - REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1); - REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000); - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0); - REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000); + + enc314_reset_fifo(enc, true); + enc314_reset_fifo(enc, false); + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 588c1c71241f..a0741794db62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -348,10 +348,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) - return odm_combine_factor; - if (is_dp_128b_132b_signal(pipe_ctx)) { + *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { *k1_div = PIXEL_RATE_DIV_BY_1; @@ -359,7 +357,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig *k2_div = PIXEL_RATE_DIV_BY_2; else *k2_div = PIXEL_RATE_DIV_BY_4; - } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { if (two_pix_per_container) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_2; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c index 47eb162f1a75..7dd36e402bac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c @@ -237,7 +237,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = { .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, + .configure_crc = optc1_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index d0ad72caead2..9066c511a052 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -847,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = { .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 2, - .num_dsc = 3, + .num_dsc = 4, }; static const struct dc_plane_cap plane_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index e4daed44ef5f..df4f25119142 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -96,8 +96,10 @@ static void dccg32_set_pixel_rate_div( // Don't program 0xF into the register field. Not valid since // K1 / K2 field is only 1 / 2 bits wide - if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) + if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) { + BREAK_TO_DEBUGGER(); return; + } dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); if (k1 == cur_k1 && k2 == cur_k2) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index cf5bd9713f54..d0b46a3e0155 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c using the max for calculation */ if (hubp->curs_attr.width > 0) { - // Round cursor width to next multiple of 64 - cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height; + cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; switch (pipe->stream->cursor_attributes.color_format) { case CURSOR_MODE_MONO: @@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c cursor_size > 16384) { /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) */ - cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp + - DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) * - DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2; + cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / + DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / + dc->caps.cache_line_size + 2; } break; } @@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { - //Round cursor width up to next multiple of 64 - int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64; - int cursor_height = hubp->curs_attr.height; - int cursor_size = cursor_width * cursor_height; + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; switch (hubp->curs_attr.color_format) { case CURSOR_MODE_MONO: @@ -1175,10 +1171,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) - return odm_combine_factor; - if (is_dp_128b_132b_signal(pipe_ctx)) { + *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { *k1_div = PIXEL_RATE_DIV_BY_1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index a88dd7b3d1c1..d1598e3131f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index b03a7814e96d..fa3778849db1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -111,7 +111,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat mall_alloc_width_blk_aligned = full_vp_width_blk_aligned; /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */ - mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) / + mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) / mblk_height * mblk_height + mblk_height; /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 61087f2385a9..6292ac515d1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d680f1c5b69f..45db40c41882 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1228,6 +1228,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.dcc = false; pipes[pipe_cnt].pipe.src.dcc_rate = 1; pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + pipes[pipe_cnt].pipe.dest.synchronize_timings = synchronized_vblank; pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start - timing->h_addressable diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index cf420ad2b8dc..34b6c763a455 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .sr_exit_z8_time_us = 442.0, .sr_enter_plus_exit_z8_time_us = 560.0, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 819de0f11012..2abe3967f7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -157,7 +157,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .dispclk_dppclk_vco_speed_mhz = 4300.0, .do_urgent_latency_adjustment = true, .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) @@ -211,7 +211,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38; + clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; @@ -221,7 +221,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38; + clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16; clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16; @@ -1803,6 +1803,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so + * prefetch is scheduled correctly to account for dummy pstate. + */ + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; @@ -1904,7 +1910,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] == dm_dram_clock_change_unsupported) { - int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1; + int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1; min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16; @@ -1990,6 +1996,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; + dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); if (!pstate_en) @@ -1997,8 +2007,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context); + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; + } } static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, @@ -2359,9 +2373,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - } + /* DML DSC delay factor workaround */ + dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; + + dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; + /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 5b91660a6496..9afd9ba23fb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -364,10 +364,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { v->DSCDelay[k] = dml32_DSCDelayRequirement(mode_lib->vba.DSCEnabled[k], mode_lib->vba.ODMCombineEnabled[k], mode_lib->vba.DSCInputBitPerComponent[k], - mode_lib->vba.OutputBpp[k], mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], + mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k], + mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], mode_lib->vba.PixelClock[k], - mode_lib->vba.PixelClockBackEnd[k]); + mode_lib->vba.PixelClockBackEnd[k], mode_lib->vba.ip.dsc_delay_factor_wa); } for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) @@ -717,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman do { MaxTotalRDBandwidth = 0; + DestinationLineTimesForPrefetchLessThan2 = false; + VRatioPrefetchMoreThanMax = false; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines); #endif @@ -785,6 +788,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, + v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], &v->DSTYAfterScaler[k], @@ -1627,7 +1632,7 @@ static void mode_support_configuration(struct vba_vars_st *v, && !mode_lib->vba.MSOOrODMSplitWithNonDPLink && !mode_lib->vba.NotEnoughLanesForMSO && mode_lib->vba.LinkCapacitySupport[i] == true && !mode_lib->vba.P2IWith420 - && !mode_lib->vba.DSCOnlyIfNecessaryWithBPP + //&& !mode_lib->vba.DSCOnlyIfNecessaryWithBPP && !mode_lib->vba.DSC422NativeNotSupported && !mode_lib->vba.MPCCombineMethodIncompatible && mode_lib->vba.ODMCombine2To1SupportCheckOK[i] == true @@ -2475,7 +2480,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.OutputBppPerState[i][k], mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], - mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]); + mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k], + mode_lib->vba.ip.dsc_delay_factor_wa); } for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { @@ -3190,6 +3196,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i], mode_lib->vba.SREnterPlusExitTime); + memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe)); v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k]; @@ -3242,6 +3249,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, + v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k], diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index c62e0991358b..c8b28c83ddf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -46,9 +46,14 @@ // Prefetch schedule max vratio #define __DML_MAX_VRATIO_PRE__ 4.0 +#define __DML_VBA_MAX_DST_Y_PRE__ 63.75 + #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define MEM_STROBE_FREQ_MHZ 1600 +#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 + struct display_mode_lib; void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index ad66e241f9ae..debe46b24a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1726,7 +1726,8 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, enum output_format_class OutputFormat, enum output_encoder_class Output, double PixelClock, - double PixelClockBackEnd) + double PixelClockBackEnd, + double dsc_delay_factor_wa) { unsigned int DSCDelayRequirement_val; @@ -1746,7 +1747,7 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, } DSCDelayRequirement_val = DSCDelayRequirement_val + (HTotal - HActive) * - dml_ceil(DSCDelayRequirement_val / HActive, 1); + dml_ceil((double)DSCDelayRequirement_val / HActive, 1); DSCDelayRequirement_val = DSCDelayRequirement_val * PixelClock / PixelClockBackEnd; @@ -1764,7 +1765,7 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, dml_print("DML::%s: DSCDelayRequirement_val = %d\n", __func__, DSCDelayRequirement_val); #endif - return DSCDelayRequirement_val; + return dml_ceil(DSCDelayRequirement_val * dsc_delay_factor_wa, 1); } void dml32_CalculateSurfaceSizeInMall( @@ -3416,6 +3417,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, + double TPreReq, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, @@ -3666,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule( dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__); #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal); dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw); @@ -3725,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule( *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; - if (dst_y_prefetch_equ > 1) { + if (dst_y_prefetch_equ > 1 && + (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) { double PrefetchBandwidth1; double PrefetchBandwidth2; double PrefetchBandwidth3; @@ -3871,7 +3875,11 @@ bool dml32_CalculatePrefetchSchedule( } if (dst_y_prefetch_oto < dst_y_prefetch_equ) { - *DestinationLinesForPrefetch = dst_y_prefetch_oto; + if (dst_y_prefetch_oto * LineTime < TPreReq) { + *DestinationLinesForPrefetch = dst_y_prefetch_equ; + } else { + *DestinationLinesForPrefetch = dst_y_prefetch_oto; + } TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; @@ -4396,7 +4404,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( if (v->NumberOfActiveSurfaces > 1) { ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY - - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] + - (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 55cead0d4237..3989c2a28fae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -30,7 +30,7 @@ #include "os_types.h" #include "../dc_features.h" #include "../display_mode_structs.h" -#include "dml/display_mode_vba.h" +#include "../display_mode_vba.h" unsigned int dml32_dscceComputeDelay( unsigned int bpc, @@ -327,7 +327,8 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, enum output_format_class OutputFormat, enum output_encoder_class Output, double PixelClock, - double PixelClockBackEnd); + double PixelClockBackEnd, + double dsc_delay_factor_wa); void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, @@ -742,6 +743,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, + double TPreReq, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index a1276f6b9581..395ae8761980 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -291,8 +291,8 @@ void dml32_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, dlg_regs->vready_after_vcount0); - dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); - dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + dst_x_after_scaler = dml_ceil(get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); + dst_y_after_scaler = dml_ceil(get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); // do some adjustment on the dst_after scaler to account for odm combine mode dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index dd90f241e906..f4b176599be7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -29,6 +29,7 @@ #include "dcn321_fpu.h" #include "dcn32/dcn32_resource.h" #include "dcn321/dcn321_resource.h" +#include "dml/dcn32/display_mode_vba_util_32.h" #define DCN3_2_DEFAULT_DET_SIZE 256 @@ -119,15 +120,15 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { }, }, .num_states = 1, - .sr_exit_time_us = 12.36, - .sr_enter_plus_exit_time_us = 16.72, + .sr_exit_time_us = 19.95, + .sr_enter_plus_exit_time_us = 24.36, .sr_exit_z8_time_us = 285.0, .sr_enter_plus_exit_z8_time_us = 320, .writeback_latency_us = 12.0, .round_trip_ping_latency_dcfclk_cycles = 263, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, + .urgent_latency_pixel_data_only_us = 4, + .urgent_latency_pixel_mixed_with_vm_data_us = 4, + .urgent_latency_vm_data_only_us = 4, .fclk_change_latency_us = 20, .usr_retraining_latency_us = 2, .smn_latency_us = 2, @@ -155,7 +156,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .dispclk_dppclk_vco_speed_mhz = 4300.0, .do_urgent_latency_adjustment = true, .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) @@ -538,9 +539,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - } + /* DML DSC delay factor workaround */ + dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; + + dcn3_21_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; + /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f33a8879b05a..64d602e6412f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -364,6 +364,10 @@ struct _vcs_dpi_ip_params_st { unsigned int max_num_dp2p0_outputs; unsigned int max_num_dp2p0_streams; unsigned int VBlankNomDefaultUS; + + /* DM workarounds */ + double dsc_delay_factor_wa; // TODO: Remove after implementing root cause fix + double min_prefetch_in_strobe_us; }; struct _vcs_dpi_display_xfc_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 03924aed8d5c..8e6585dab20e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -625,7 +625,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] = dout->is_virtual; - if (!dout->dsc_enable) + if (dout->dsc_enable) mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp; else mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index d635b73af46f..0ea52ba5ac82 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -108,6 +108,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, + { DDC_GPIO_VGA_REG_LIST(DATA), .ddc_setup = 0, .phy_aux_cntl = 0, @@ -122,6 +129,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, + { DDC_GPIO_VGA_REG_LIST(CLK), .ddc_setup = 0, .phy_aux_cntl = 0, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 6fd38cdd68c0..525bc8881950 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -94,11 +94,14 @@ static enum gpio_result set_config( * is required for detection of AUX mode */ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) { if (!ddc_data_pd_en || !ddc_clk_pd_en) { - - REG_SET_2(gpio.MASK_reg, regval, + if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) { + // bit 4 of mask has different usage in some cases + REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1); + } else { + REG_SET_2(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1, DC_GPIO_DDC1CLK_PD_EN, 1); - + } if (config_data->type == GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE) msleep(3); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index dad3e3741a4e..190af79f3236 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed) { - uint32_t current_rpm; - uint32_t percent = 0; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + struct amdgpu_device *adev = hwmgr->adev; + uint32_t duty100, duty; + uint64_t tmp64; - if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) - return -1; + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), + CG_THERMAL_STATUS, FDO_PWM_DUTY); - if (hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM != 0) - percent = current_rpm * 255 / - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM; + if (!duty100) + return -EINVAL; - *speed = MIN(percent, 255); + tmp64 = (uint64_t)duty * 255; + do_div(tmp64, duty100); + *speed = MIN((uint32_t)tmp64, 255); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 4fe75dd2b329..b880f4d7d67e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu) uint64_t features_supported; int ret = 0; - if (adev->in_suspend && smu_is_dpm_running(smu)) { - dev_info(adev->dev, "dpm has been enabled\n"); - /* this is needed specifically */ - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(11, 0, 7): - case IP_VERSION(11, 0, 11): - case IP_VERSION(11, 5, 0): - case IP_VERSION(11, 0, 12): + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + if (adev->in_suspend && smu_is_dpm_running(smu)) { + dev_info(adev->dev, "dpm has been enabled\n"); ret = smu_system_features_control(smu, true); if (ret) dev_err(adev->dev, "Failed system features control!\n"); - break; - default: - break; + return ret; } - return ret; + break; + default: + break; } ret = smu_init_display_count(smu, 0); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index e2fa3b066b96..f816b1dd110e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1388,6 +1388,14 @@ enum smu_cmn2asic_mapping_type { CMN2ASIC_MAPPING_WORKLOAD, }; +enum smu_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + #define MSG_MAP(msg, index, valid_in_vf) \ [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 25c08f963f49..d6b13933a98f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -25,10 +25,10 @@ // *** IMPORTANT *** // PMFW TEAM: Always increment the interface version on any change to this file -#define SMU13_DRIVER_IF_VERSION 0x2C +#define SMU13_DRIVER_IF_VERSION 0x35 //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x20 +#define PPTABLE_VERSION 0x27 #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -96,7 +96,7 @@ #define FEATURE_MEM_TEMP_READ_BIT 47 #define FEATURE_ATHUB_MMHUB_PG_BIT 48 #define FEATURE_SOC_PCC_BIT 49 -#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_EDC_PWRBRK_BIT 50 #define FEATURE_SPARE_51_BIT 51 #define FEATURE_SPARE_52_BIT 52 #define FEATURE_SPARE_53_BIT 53 @@ -282,15 +282,15 @@ typedef enum { } I2cControllerPort_e; typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_VMEMP, - I2C_CONTROLLER_NAME_VR_VDDIO, - I2C_CONTROLLER_NAME_LIQUID0, - I2C_CONTROLLER_NAME_LIQUID1, - I2C_CONTROLLER_NAME_PLX, - I2C_CONTROLLER_NAME_OTHER, - I2C_CONTROLLER_NAME_COUNT, + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, } I2cControllerName_e; typedef enum { @@ -302,6 +302,7 @@ typedef enum { I2C_CONTROLLER_THROTTLER_LIQUID0, I2C_CONTROLLER_THROTTLER_LIQUID1, I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, I2C_CONTROLLER_THROTTLER_INA3221, I2C_CONTROLLER_THROTTLER_COUNT, } I2cControllerThrottler_e; @@ -309,8 +310,9 @@ typedef enum { typedef enum { I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, I2C_CONTROLLER_PROTOCOL_VR_IR35217, - I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, I2C_CONTROLLER_PROTOCOL_COUNT, } I2cControllerProtocol_e; @@ -690,6 +692,9 @@ typedef struct { #define PP_OD_FEATURE_UCLK_BIT 8 #define PP_OD_FEATURE_ZERO_FAN_BIT 9 #define PP_OD_FEATURE_TEMPERATURE_BIT 10 +#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11 +#define PP_OD_FEATURE_ASIC_TDC_BIT 12 +#define PP_OD_FEATURE_COUNT 13 typedef enum { PP_OD_POWER_FEATURE_ALWAYS_ENABLED, @@ -697,6 +702,11 @@ typedef enum { PP_OD_POWER_FEATURE_ALWAYS_DISABLED, } PP_OD_POWER_FEATURE_e; +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + typedef struct { uint32_t FeatureCtrlMask; @@ -708,8 +718,8 @@ typedef struct { uint8_t RuntimePwrSavingFeaturesCtrl; //Frequency changes - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz @@ -730,7 +740,12 @@ typedef struct { uint8_t MaxOpTemp; uint8_t Padding[4]; - uint32_t Spare[12]; + uint16_t GfxVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + int16_t AsicTdc; + + uint32_t Spare[10]; uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround } OverDriveTable_t; @@ -748,8 +763,8 @@ typedef struct { uint8_t IdlePwrSavingFeaturesCtrl; uint8_t RuntimePwrSavingFeaturesCtrl; - uint16_t GfxclkFmin; // MHz - uint16_t GfxclkFmax; // MHz + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz @@ -769,7 +784,12 @@ typedef struct { uint8_t MaxOpTemp; uint8_t Padding[4]; - uint32_t Spare[12]; + uint16_t GfxVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + int16_t AsicTdc; + + uint32_t Spare[10]; } OverDriveLimits_t; @@ -903,7 +923,8 @@ typedef struct { uint16_t FanStartTempMin; uint16_t FanStartTempMax; - uint32_t Spare[12]; + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; } MsgLimits_t; @@ -1086,11 +1107,13 @@ typedef struct { uint32_t GfxoffSpare[15]; // GFX GPO - float DfllBtcMasterScalerM; + uint32_t DfllBtcMasterScalerM; int32_t DfllBtcMasterScalerB; - float DfllBtcSlaveScalerM; + uint32_t DfllBtcSlaveScalerM; int32_t DfllBtcSlaveScalerB; - uint32_t GfxGpoSpare[12]; + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + uint32_t GfxGpoSpare[10]; // GFX DCS @@ -1106,7 +1129,10 @@ typedef struct { uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. - uint32_t DcsSpare[16]; + uint32_t DcsSpare[14]; + + // UCLK section + uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz // UCLK section uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations @@ -1163,13 +1189,14 @@ typedef struct { uint16_t IntakeTempHighIntakeAcousticLimit; uint16_t IntakeTempAcouticLimitReleaseRate; - uint16_t FanStalledTempLimitOffset; + int16_t FanAbnormalTempLimitOffset; uint16_t FanStalledTriggerRpm; - uint16_t FanAbnormalTriggerRpm; - uint16_t FanPadding; - - uint32_t FanSpare[14]; + uint16_t FanAbnormalTriggerRpmCoeff; + uint16_t FanAbnormalDetectionEnable; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding[3]; + uint32_t FanSpare[13]; // SECTION: VDD_GFX AVFS uint8_t OverrideGfxAvfsFuses; @@ -1193,7 +1220,6 @@ typedef struct { uint32_t dGbV_dT_vmin; uint32_t dGbV_dT_vmax; - //Unused: PMFW-9370 uint32_t V2F_vmin_range_low; uint32_t V2F_vmin_range_high; uint32_t V2F_vmax_range_low; @@ -1238,8 +1264,21 @@ typedef struct { // SECTION: Advanced Options uint32_t DebugOverrides; + // Section: Total Board Power idle vs active coefficients + uint8_t TotalBoardPowerSupport; + uint8_t TotalBoardPowerPadding[3]; + + int16_t TotalIdleBoardPowerM; + int16_t TotalIdleBoardPowerB; + int16_t TotalBoardPowerM; + int16_t TotalBoardPowerB; + + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + // SECTION: Sku Reserved - uint32_t Spare[64]; + uint32_t Spare[43]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; @@ -1304,7 +1343,8 @@ typedef struct { // SECTION: Clock Spread Spectrum // UCLK Spread Spectrum - uint16_t UclkSpreadPadding; + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 + uint8_t UclkSpreadPadding; uint16_t UclkSpreadFreq; // kHz // UCLK Spread Spectrum @@ -1317,11 +1357,7 @@ typedef struct { // Section: Memory Config uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e - uint8_t PaddingMem1[3]; - - // Section: Total Board Power - uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power - uint16_t BoardPowerPadding; + uint8_t PaddingMem1[7]; // SECTION: UMC feature flags uint8_t HsrEnabled; @@ -1423,8 +1459,11 @@ typedef struct { uint16_t Vcn1ActivityPercentage ; uint32_t EnergyAccumulator; - uint16_t AverageSocketPower ; + uint16_t AverageSocketPower; + uint16_t AverageTotalBoardPower; + uint16_t AvgTemperature[TEMP_COUNT]; + uint16_t AvgTemperatureFanIntake; uint8_t PcieRate ; uint8_t PcieWidth ; @@ -1592,5 +1631,7 @@ typedef struct { #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h index d9b0cd752200..f4d6c07b56ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h @@ -54,14 +54,14 @@ #define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version -#define PPSMC_MSG_EnableGfxOff 0x04 ///< Enable GFXOFF -#define PPSMC_MSG_DisableGfxOff 0x05 ///< Disable GFXOFF +#define PPSMC_MSG_SPARE0 0x04 ///< SPARE +#define PPSMC_MSG_SPARE1 0x05 ///< SPARE #define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Needs update -#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF +#define PPSMC_MSG_SPARE2 0x0A ///< SPARE +#define PPSMC_MSG_SPARE3 0x0B ///< SPARE #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer #define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer @@ -73,8 +73,7 @@ #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) - -#define PPSMC_MSG_EnableGfxImu 0x16 ///< Needs update +#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency @@ -102,8 +101,8 @@ #define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK #define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler #define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler -#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis -#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn +#define PPSMC_MSG_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis +#define PPSMC_MSG_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn #define PPSMC_Message_Count 0x31 ///< Total number of PPSMC messages /** @}*/ diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index a9215494dcdd..d466db6f0ad4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -147,14 +147,6 @@ struct smu_11_5_power_context { uint32_t max_fast_ppt_limit; }; -enum smu_v11_0_baco_seq { - BACO_SEQ_BACO = 0, - BACO_SEQ_MSR, - BACO_SEQ_BAMACO, - BACO_SEQ_ULPS, - BACO_SEQ_COUNT, -}; - #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) int smu_v11_0_init_microcode(struct smu_context *smu); @@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu); int smu_v11_0_baco_exit(struct smu_context *smu); int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_v11_0_baco_seq baco_seq); + enum smu_baco_seq baco_seq); int smu_v11_0_mode1_reset(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 80fb583b18d9..865d6358918d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -31,7 +31,7 @@ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms @@ -124,14 +124,6 @@ struct smu_13_0_power_context { enum smu_13_0_power_state power_state; }; -enum smu_v13_0_baco_seq { - BACO_SEQ_BACO = 0, - BACO_SEQ_MSR, - BACO_SEQ_BAMACO, - BACO_SEQ_ULPS, - BACO_SEQ_COUNT, -}; - #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) int smu_v13_0_init_microcode(struct smu_context *smu); @@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); +int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, + enum smu_baco_seq baco_seq); + bool smu_v13_0_baco_is_support(struct smu_context *smu); enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 74996a8fb671..697e98a0a20a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -377,7 +377,13 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) || ((adev->pdev->device == 0x73BF) && - (adev->pdev->revision == 0xCF))) + (adev->pdev->revision == 0xCF)) || + ((adev->pdev->device == 0x7422) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73A3) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73E3) && + (adev->pdev->revision == 0x00))) smu_baco->platform_support = false; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index dccbd9f70723..70b560737687 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) } int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_v11_0_baco_seq baco_seq) + enum smu_baco_seq baco_seq) { return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 43fb102a65f5..89f0f6eb19f3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2230,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu, return ret; } +int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, + enum smu_baco_seq baco_seq) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_ArmD3, + baco_seq, + NULL); +} + bool smu_v13_0_baco_is_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 29529328152d..f0121d171630 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -1566,6 +1567,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, NULL); } +static int smu_v13_0_0_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v13_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v13_0_baco_enter(smu); +} + +static int smu_v13_0_0_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v13_0_baco_exit(smu); + } +} + static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1827,8 +1853,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .baco_is_support = smu_v13_0_baco_is_support, .baco_get_state = smu_v13_0_baco_get_state, .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_baco_enter, - .baco_exit = smu_v13_0_baco_exit, + .baco_enter = smu_v13_0_0_baco_enter, + .baco_exit = smu_v13_0_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_0_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c4102cfb734c..d74debc584f8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -122,6 +122,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1578,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, return ret; } +static int smu_v13_0_7_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v13_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v13_0_baco_enter(smu); +} + +static int smu_v13_0_7_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v13_0_baco_exit(smu); + } +} + static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1655,8 +1681,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .baco_is_support = smu_v13_0_baco_is_support, .baco_get_state = smu_v13_0_baco_get_state, .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_baco_enter, - .baco_exit = smu_v13_0_baco_exit, + .baco_enter = smu_v13_0_7_baco_enter, + .baco_exit = smu_v13_0_7_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index 3ea53bb67d3b..bd61e20770a5 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -63,23 +63,45 @@ ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, u8 offset, void *buffer, size_t size) { + u8 zero = 0; + char *tmpbuf = NULL; + /* + * As sub-addressing is not supported by all adaptors, + * always explicitly read from the start and discard + * any bytes that come before the requested offset. + * This way, no matter whether the adaptor supports it + * or not, we'll end up reading the proper data. + */ struct i2c_msg msgs[] = { { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = 0, .len = 1, - .buf = &offset, + .buf = &zero, }, { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = I2C_M_RD, - .len = size, + .len = size + offset, .buf = buffer, }, }; int ret; + if (offset) { + tmpbuf = kmalloc(size + offset, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + msgs[1].buf = tmpbuf; + } + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (tmpbuf) + memcpy(buffer, tmpbuf + offset, size); + + kfree(tmpbuf); + if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) @@ -208,18 +230,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev, if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; - /* - * Sigh. Some (maybe all?) type 1 adaptors are broken and ack - * the offset but ignore it, and instead they just always return - * data from the start of the HDMI ID buffer. So for a broken - * type 1 HDMI adaptor a single byte read will always give us - * 0x44, and for a type 1 DVI adaptor it should give 0x00 - * (assuming it implements any registers). Fortunately neither - * of those values will match the type 2 signature of the - * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with - * the type 2 adaptor detection safely even in the presence - * of broken type 1 adaptors. - */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret); @@ -233,11 +243,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev, return DRM_DP_DUAL_MODE_TYPE2_DVI; } /* - * If neither a proper type 1 ID nor a broken type 1 adaptor - * as described above, assume type 1, but let the user know - * that we may have misdetected the type. + * If not a proper type 1 ID, still assume type 1, but let + * the user know that we may have misdetected the type. */ - if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + if (!is_type1_adaptor(adaptor_id)) drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id); } @@ -343,10 +352,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); * @enable: enable (as opposed to disable) the TMDS output buffers * * Set the state of the TMDS output buffers in the adaptor. For - * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As - * some type 1 adaptors have problems with registers (see comments - * in drm_dp_dual_mode_detect()) we avoid touching the register, - * making this function a no-op on type 1 adaptors. + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. + * Type1 adaptors do not support any register writes. * * Returns: * 0 on success, negative error code on failure diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index ecd22c038c8c..51a46689cda7 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -5186,7 +5186,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm mst_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(mst_state)) - return -EINVAL; + return PTR_ERR(mst_state); list_for_each_entry(pos, &mst_state->payloads, next) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8214a0b1ab7f..203bf8d6c34c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -615,7 +615,7 @@ static int drm_dev_init(struct drm_device *dev, mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); - ret = drmm_add_action(dev, drm_dev_init_release, NULL); + ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index e2f76621453c..3ee59bae9d2f 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -807,6 +807,38 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t return false; } +static const uint32_t conv_from_xrgb8888[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, +}; + +static const uint32_t conv_from_rgb565_888[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static bool is_conversion_supported(uint32_t from, uint32_t to) +{ + switch (from) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to); + case DRM_FORMAT_RGB565: + case DRM_FORMAT_RGB888: + return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to); + case DRM_FORMAT_XRGB2101010: + return to == DRM_FORMAT_ARGB2101010; + case DRM_FORMAT_ARGB2101010: + return to == DRM_FORMAT_XRGB2101010; + default: + return false; + } +} + /** * drm_fb_build_fourcc_list - Filters a list of supported color formats against * the device's native formats @@ -827,7 +859,9 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t * be handed over to drm_universal_plane_init() et al. Native formats * will go before emulated formats. Other heuristics might be applied * to optimize the order. Formats near the beginning of the list are - * usually preferred over formats near the end of the list. + * usually preferred over formats near the end of the list. Formats + * without conversion helpers will be skipped. New drivers should only + * pass in XRGB8888 and avoid exposing additional emulated formats. * * Returns: * The number of color-formats 4CC codes returned in @fourccs_out. @@ -839,7 +873,7 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, { u32 *fourccs = fourccs_out; const u32 *fourccs_end = fourccs_out + nfourccs_out; - bool found_native = false; + uint32_t native_format = 0; size_t i; /* @@ -858,27 +892,19 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc); - if (!found_native) - found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc); + /* + * There should only be one native format with the current API. + * This API needs to be refactored to correctly support arbitrary + * sets of native formats, since it needs to report which native + * format to use for each emulated format. + */ + if (!native_format) + native_format = fourcc; *fourccs = fourcc; ++fourccs; } /* - * The plane's atomic_update helper converts the framebuffer's color format - * to a native format when copying to device memory. - * - * If there is not a single format supported by both, device and - * driver, the native formats are likely not supported by the conversion - * helpers. Therefore *only* support the native formats and add a - * conversion helper ASAP. - */ - if (!found_native) { - drm_warn(dev, "Format conversion helpers required to add extra formats.\n"); - goto out; - } - - /* * The extra formats, emulated by the driver, go second. */ @@ -890,6 +916,9 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, } else if (fourccs == fourccs_end) { drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc); continue; /* end of available output buffer */ + } else if (!is_conversion_supported(fourcc, native_format)) { + drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc); + continue; /* format is not supported for conversion */ } drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc); @@ -898,7 +927,6 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, ++fourccs; } -out: return fourccs - fourccs_out; } EXPORT_SYMBOL(drm_fb_build_fourcc_list); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 7bb98e6a446d..5ea5e260118c 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -104,7 +104,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank) static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank) { - kthread_destroy_worker(vblank->worker); + if (vblank->worker) + kthread_destroy_worker(vblank->worker); } int drm_vblank_worker_init(struct drm_vblank_crtc *vblank); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 939d621c9ad4..688c8afe0bf1 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, count = 0; connector_id = u64_to_user_ptr(card_res->connector_id_ptr); drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->registration_state != DRM_CONNECTOR_REGISTERED) - continue; - /* only expose writeback connectors if userspace understands them */ if (!file_priv->writeback_connectors && (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 8a0c0e0bb5bd..52d8800a8ab8 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Acer Switch V 10 (SW5-017) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Anbernic Win600 */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), @@ -319,6 +325,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a26edcdadc21..cea00aaca04b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -282,6 +282,7 @@ i915-y += \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ display/intel_display_trace.o \ + display/intel_dkl_phy.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index da8472cdc135..69ecf2a3d6c6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -43,6 +43,7 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" @@ -1262,33 +1263,30 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, for (ln = 0; ln < 2; ln++) { int level; - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, ln)); - - intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); + intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), ln, 0); level = intel_ddi_level(encoder, crtc_state, 2*ln+0); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), - DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK, - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), ln, + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), - DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK, - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), ln, + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), - DKL_TX_DP20BITMODE, 0); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, + DKL_TX_DP20BITMODE, 0); if (IS_ALDERLAKE_P(dev_priv)) { u32 val; @@ -1306,10 +1304,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); } - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, - val); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, + val); } } } @@ -2019,12 +2017,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, return; if (DISPLAY_VER(dev_priv) >= 12) { - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x0)); - ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x1)); - ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); + ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 0); + ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 1); } else { ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); @@ -2085,12 +2079,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, } if (DISPLAY_VER(dev_priv) >= 12) { - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x0)); - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x1)); - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1); + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 0, ln0); + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 1, ln1); } else { intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); @@ -3094,10 +3084,8 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); int ln; - for (ln = 0; ln < 2; ln++) { - intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); - intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0); - } + for (ln = 0; ln < 2; ln++) + intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port), ln, DKL_PCS_DW5_CORE_SOFTRESET, 0); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 461c62c88413..de77054195c6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3723,12 +3723,16 @@ out: static u8 bigjoiner_pipes(struct drm_i915_private *i915) { + u8 pipes; + if (DISPLAY_VER(i915) >= 12) - return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); + pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); else if (DISPLAY_VER(i915) >= 11) - return BIT(PIPE_B) | BIT(PIPE_C); + pipes = BIT(PIPE_B) | BIT(PIPE_C); else - return 0; + pipes = 0; + + return pipes & RUNTIME_INFO(i915)->pipe_mask; } static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 96cf994b0ad1..9b51148e8ba5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -316,6 +316,14 @@ struct intel_display { } dbuf; struct { + /* + * dkl.phy_lock protects against concurrent access of the + * Dekel TypeC PHYs. + */ + spinlock_t phy_lock; + } dkl; + + struct { /* VLV/CHV/BXT/GLK DSI MMIO register base address */ u32 mmio_base; } dsi; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1e608b9e5055..1a63da28f330 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2434,7 +2434,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); - if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_IO_A; return domains->ddi_io + (int)(port - domains->port_start); @@ -2445,7 +2445,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); - if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_LANES_A; return domains->ddi_lanes + (int)(port - domains->port_start); @@ -2471,7 +2471,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); - if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_A; return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); @@ -2482,7 +2482,7 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); - if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) + if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_TBT1; return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index df7ee4969ef1..1d18eee56253 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -12,6 +12,7 @@ #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dmc.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" @@ -529,11 +530,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, enum tc_port tc_port; tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), - DKL_CMN_UC_DW27_UC_HEALTH, 1)) + if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port), 2) & + DKL_CMN_UC_DW27_UC_HEALTH, 1)) drm_warn(&dev_priv->drm, "Timeout waiting TC uC health\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c new file mode 100644 index 000000000000..710b030c7ed5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "intel_de.h" +#include "intel_display.h" +#include "intel_dkl_phy.h" + +static void +dkl_phy_set_hip_idx(struct drm_i915_private *i915, i915_reg_t reg, int idx) +{ + enum tc_port tc_port = DKL_REG_TC_PORT(reg); + + drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS); + + intel_de_write(i915, + HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, idx)); +} + +/** + * intel_dkl_phy_read - read a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * + * Read the @reg Dekel PHY register. + * + * Returns the read value. + */ +u32 +intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) +{ + u32 val; + + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + val = intel_de_read(i915, reg); + + spin_unlock(&i915->display.dkl.phy_lock); + + return val; +} + +/** + * intel_dkl_phy_write - write a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * @val: value to write + * + * Write @val to the @reg Dekel PHY register. + */ +void +intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_write(i915, reg, val); + + spin_unlock(&i915->display.dkl.phy_lock); +} + +/** + * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * @clear: mask to clear + * @set: mask to set + * + * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing + * this value back to the register if the value differs from the read one. + */ +void +intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_rmw(i915, reg, clear, set); + + spin_unlock(&i915->display.dkl.phy_lock); +} + +/** + * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * + * Read the @reg Dekel PHY register without returning the read value. + */ +void +intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_posting_read(i915, reg); + + spin_unlock(&i915->display.dkl.phy_lock); +} diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h new file mode 100644 index 000000000000..260ad121a0b1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DKL_PHY_H__ +#define __INTEL_DKL_PHY_H__ + +#include <linux/types.h> + +#include "i915_reg_defs.h" + +struct drm_i915_private; + +u32 +intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); +void +intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val); +void +intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set); +void +intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); + +#endif /* __INTEL_DKL_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 47419d162f30..2b5bc95a8b0d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5276,7 +5276,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, encoder->devdata, IS_ERR(edid) ? NULL : edid); intel_panel_add_edid_fixed_modes(intel_connector, - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE || intel_vrr_is_capable(intel_connector)); /* MSO requires information from the EDID */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index e5fb66a5dd02..64dd603dc69a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -25,6 +25,7 @@ #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" @@ -3508,15 +3509,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, * All registers read here have the same HIP_INDEX_REG even though * they are on different building blocks */ - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - - hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, - DKL_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv, + DKL_REFCLKIN_CTL(tc_port), 2); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_hsclkctl = - intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | @@ -3524,32 +3522,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_clktop2_coreclkctl1 = - intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; - hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); - hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); - hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); hw_state->mg_pll_tdc_coldst_bias = - intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); @@ -3737,61 +3735,58 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, * All registers programmed here have the same HIP_INDEX_REG even * though on different building block */ - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - /* All the registers are RMW */ - val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; - intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, - hw_state->mg_pll_div0); + intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val, + hw_state->mg_pll_div0); - val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); val |= hw_state->mg_pll_div1; - intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); val |= hw_state->mg_pll_ssc; - intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); val &= ~(DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); val |= hw_state->mg_pll_bias; - intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); val |= hw_state->mg_pll_tdc_coldst_bias; - intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val); - intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); } static void icl_pll_power_enable(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 9aa38e8141b5..e5352239b2a2 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -972,8 +972,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) /* Try EDID first */ intel_panel_add_edid_fixed_modes(intel_connector, - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, - false); + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE); /* Failed to get EDID, what about VBT? */ if (!intel_panel_preferred_fixed_mode(intel_connector)) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index a3a3f9fe4342..41cec9dc4223 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -254,10 +254,10 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector) } void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, - bool has_drrs, bool has_vrr) + bool use_alt_fixed_modes) { intel_panel_add_edid_preferred_mode(connector); - if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) + if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes) intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_destroy_probed_modes(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index eff3ffd3d082..5c5b5b7f95b6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -44,7 +44,7 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state, int intel_panel_compute_config(struct intel_connector *connector, struct drm_display_mode *adjusted_mode); void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, - bool has_drrs, bool has_vrr); + bool use_alt_fixed_modes); void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector); void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector); void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index d4cce627d7a8..15c3e448aa0e 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -2201,8 +2201,11 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 val; - if (intel_dp->psr.psr2_sel_fetch_cff_enabled) + if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { + /* Send one update otherwise lag is observed in screen */ + intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); return; + } val = man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index f5b744bef18f..774c1dc31a52 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2747,13 +2747,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) if (!intel_sdvo_connector) return false; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; - } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; @@ -2808,7 +2805,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { @@ -2849,13 +2845,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2885,13 +2878,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2910,8 +2900,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_panel_add_vbt_sdvo_fixed_mode(intel_connector); if (!intel_panel_preferred_fixed_mode(intel_connector)) { + mutex_lock(&i915->drm.mode_config.mutex); + intel_ddc_get_modes(connector, &intel_sdvo->ddc); - intel_panel_add_edid_fixed_modes(intel_connector, false, false); + intel_panel_add_edid_fixed_modes(intel_connector, false); + + mutex_unlock(&i915->drm.mode_config.mutex); } intel_panel_init(intel_connector); @@ -2926,16 +2920,39 @@ err: return false; } +static u16 intel_sdvo_filter_output_flags(u16 flags) +{ + flags &= SDVO_OUTPUT_MASK; + + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + if (!(flags & SDVO_OUTPUT_TMDS0)) + flags &= ~SDVO_OUTPUT_TMDS1; + + if (!(flags & SDVO_OUTPUT_RGB0)) + flags &= ~SDVO_OUTPUT_RGB1; + + if (!(flags & SDVO_OUTPUT_LVDS0)) + flags &= ~SDVO_OUTPUT_LVDS1; + + return flags; +} + static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { - /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); + + flags = intel_sdvo_filter_output_flags(flags); + + intel_sdvo->controlled_output = flags; + + intel_sdvo_select_ddc_bus(i915, intel_sdvo); if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (flags & SDVO_OUTPUT_TMDS1) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; @@ -2956,7 +2973,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (flags & SDVO_OUTPUT_RGB1) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; @@ -2964,14 +2981,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (flags & SDVO_OUTPUT_LVDS1) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; - if ((flags & SDVO_OUTPUT_MASK) == 0) { + if (flags == 0) { unsigned char bytes[2]; - intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), @@ -3383,8 +3399,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, */ intel_sdvo->base.cloneable = 0; - intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo); - /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err_output; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index f5062d0c6333..824971a1ceec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -40,13 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme goto err; } - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; src = obj->mm.pages->sgl; dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { + for (i = 0; i < obj->mm.pages->orig_nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index c698f95af15f..629acb403a2c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -6,7 +6,6 @@ #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/swiotlb.h> #include "i915_drv.h" #include "i915_gem.h" @@ -38,22 +37,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct scatterlist *sg; unsigned int sg_page_sizes; unsigned int npages; - int max_order; + int max_order = MAX_ORDER; + unsigned int max_segment; gfp_t gfp; - max_order = MAX_ORDER; -#ifdef CONFIG_SWIOTLB - if (is_swiotlb_active(obj->base.dev->dev)) { - unsigned int max_segment; - - max_segment = swiotlb_max_segment(); - if (max_segment) { - max_segment = max_t(unsigned int, max_segment, - PAGE_SIZE) >> PAGE_SHIFT; - max_order = min(max_order, ilog2(max_segment)); - } - } -#endif + max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT; + max_order = min(max_order, get_order(max_segment)); gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index f42ca1179f37..2f7804492cd5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -194,7 +194,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) struct intel_memory_region *mem = obj->mm.region; struct address_space *mapping = obj->base.filp->f_mapping; const unsigned long page_count = obj->base.size / PAGE_SIZE; - unsigned int max_segment = i915_sg_segment_size(); + unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); struct sg_table *st; struct sgt_iter sgt_iter; struct page *page; @@ -369,14 +369,14 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, __start_cpu_write(obj); /* - * On non-LLC platforms, force the flush-on-acquire if this is ever + * On non-LLC igfx platforms, force the flush-on-acquire if this is ever * swapped-in. Our async flush path is not trust worthy enough yet(and * happens in the wrong order), and with some tricks it's conceivable * for userspace to change the cache-level to I915_CACHE_NONE after the * pages are swapped-in, and since execbuf binds the object before doing * the async flush, we have a race window. */ - if (!HAS_LLC(i915)) + if (!HAS_LLC(i915) && !IS_DGFX(i915)) obj->cache_dirty = true; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4f861782c3e8..0d6d640225fc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -189,7 +189,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); - const unsigned int max_segment = i915_sg_segment_size(); + const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; struct file *filp = i915_tt->filp; struct sgt_iter sgt_iter; @@ -538,7 +538,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, - i915_sg_segment_size(), GFP_KERNEL); + i915_sg_segment_size(i915_tt->dev), GFP_KERNEL); if (ret) { st->sgl = NULL; return ERR_PTR(ret); @@ -612,6 +612,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj) WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); + err = ttm_bo_wait(bo, true, false); + if (err) + return err; + err = i915_ttm_move_notify(bo); if (err) return err; @@ -1013,9 +1017,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - if (i915_ttm_cpu_maps_iomem(bo->resource)) - wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); - if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; @@ -1042,6 +1043,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } } + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index d4398948f016..ba14b18d65f3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -129,7 +129,7 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; - unsigned int max_segment = i915_sg_segment_size(); + unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; unsigned int sg_page_sizes; struct page **pvec; @@ -428,9 +428,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; + unsigned long end = addr + len; mmap_read_lock(mm); - for_each_vma_range(vmi, vma, addr + len) { + for_each_vma_range(vmi, vma, end) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; @@ -442,7 +443,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) } mmap_read_unlock(mm); - if (vma) + if (vma || addr < end) return -EFAULT; return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d0b03a928b9a..7caa3412a244 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -625,8 +625,13 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) return -EINTR; } - return timeout ? timeout : intel_uc_wait_for_idle(>->uc, - remaining_timeout); + if (timeout) + return timeout; + + if (remaining_timeout < 0) + remaining_timeout = 0; + + return intel_uc_wait_for_idle(>->uc, remaining_timeout); } int intel_gt_init(struct intel_gt *gt) @@ -1017,6 +1022,11 @@ static void mmio_invalidate_full(struct intel_gt *gt) if (!i915_mmio_reg_offset(rb.reg)) continue; + if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS || + engine->class == COMPUTE_CLASS)) + rb.bit = _MASKED_BIT_ENABLE(rb.bit); + intel_uncore_write_fw(uncore, rb.reg, rb.bit); awake |= engine->mask; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index edb881d75630..1dfd01668c79 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -199,7 +199,7 @@ out_active: spin_lock(&timelines->lock); if (remaining_timeout) *remaining_timeout = timeout; - return active_count ? timeout : 0; + return active_count ? timeout ?: -ETIME : 0; } static void retire_work_handler(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 7a45e5360caf..714221f9a131 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -664,8 +664,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) return -ESRCH; } - kvm_get_kvm(vgpu->vfio_device.kvm); - if (__kvmgt_vgpu_exist(vgpu)) return -EEXIST; @@ -676,6 +674,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_get_kvm(vgpu->vfio_device.kvm); kvm_page_track_register_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); @@ -715,15 +714,14 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); + kvm_put_kvm(vgpu->vfio_device.kvm); + kvmgt_protect_table_destroy(vgpu); gvt_cache_destroy(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); vgpu->attached = false; - - if (vgpu->vfio_device.kvm) - kvm_put_kvm(vgpu->vfio_device.kvm); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index c459eb362c47..f2a15d8155f4 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -353,6 +353,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->display.wm.wm_mutex); mutex_init(&dev_priv->display.pps.mutex); mutex_init(&dev_priv->display.hdcp.comp_mutex); + spin_lock_init(&dev_priv->display.dkl.phy_lock); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0b287a59dc2f..da35bb2db26b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7420,6 +7420,9 @@ enum skl_power_gate { #define _DKL_PHY5_BASE 0x16C000 #define _DKL_PHY6_BASE 0x16D000 +#define DKL_REG_TC_PORT(__reg) \ + (TC_PORT_1 + ((__reg).reg - _DKL_PHY1_BASE) / (_DKL_PHY2_BASE - _DKL_PHY1_BASE)) + /* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ #define _DKL_PCS_DW5 0x14 #define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 9ddb3e743a3e..b0a1db44f895 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -9,7 +9,8 @@ #include <linux/pfn.h> #include <linux/scatterlist.h> -#include <linux/swiotlb.h> +#include <linux/dma-mapping.h> +#include <xen/xen.h> #include "i915_gem.h" @@ -127,19 +128,26 @@ static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) return page_sizes; } -static inline unsigned int i915_sg_segment_size(void) +static inline unsigned int i915_sg_segment_size(struct device *dev) { - unsigned int size = swiotlb_max_segment(); - - if (size == 0) - size = UINT_MAX; - - size = rounddown(size, PAGE_SIZE); - /* swiotlb_max_segment_size can return 1 byte when it means one page. */ - if (size < PAGE_SIZE) - size = PAGE_SIZE; - - return size; + size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev)); + + /* + * For Xen PV guests pages aren't contiguous in DMA (machine) address + * space. The DMA API takes care of that both in dma_alloc_* (by + * calling into the hypervisor to make the pages contiguous) and in + * dma_map_* (by bounce buffering). But i915 abuses ignores the + * coherency aspects of the DMA API and thus can't cope with bounce + * buffering actually happening, so add a hack here to force small + * allocations and mappings when running in PV mode on Xen. + * + * Note this will still break if bounce buffering is required for other + * reasons, like confidential computing hypervisors or PCIe root ports + * with addressing limitations. + */ + if (xen_pv_domain()) + max = PAGE_SIZE; + return round_down(max, PAGE_SIZE); } bool i915_sg_trim(struct sg_table *orig_st); diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 2403ccd52c74..bba8cb6e8ae4 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -471,8 +471,7 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915) u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL); struct dram_info *dram_info = &i915->dram_info; - val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val); - switch (val) { + switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) { case 0: dram_info->type = INTEL_DRAM_DDR4; break; diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 975de4ff7313..fd5b2471fdf0 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -4,7 +4,6 @@ config DRM_IMX select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_DMA_HELPER - select DRM_KMS_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on IMX_IPUV3_CORE help diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 6b34fac3f73a..ab4d1c878fda 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -218,8 +218,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) return ret; } -static int imx_tve_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +imx_tve_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); unsigned long rate; diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index 011be7ff51e1..bc8fb4e38d0a 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -112,11 +112,6 @@ int lima_devfreq_init(struct lima_device *ldev) unsigned long cur_freq; int ret; const char *regulator_names[] = { "mali", NULL }; - const char *clk_names[] = { "core", NULL }; - struct dev_pm_opp_config config = { - .regulator_names = regulator_names, - .clk_names = clk_names, - }; if (!device_property_present(dev, "operating-points-v2")) /* Optional, continue without devfreq */ @@ -124,7 +119,15 @@ int lima_devfreq_init(struct lima_device *ldev) spin_lock_init(&ldevfreq->lock); - ret = devm_pm_opp_set_config(dev, &config); + /* + * clkname is set separately so it is not affected by the optional + * regulator setting which may return error. + */ + ret = devm_pm_opp_set_clkname(dev, "core"); + if (ret) + return ret; + + ret = devm_pm_opp_set_regulators(dev, regulator_names); if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 2944228a8e2c..8a3b685c2fcc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2500,6 +2500,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = { static const struct panel_desc logictechno_lt161010_2nh = { .timings = &logictechno_lt161010_2nh_timing, .num_timings = 1, + .bpc = 6, .size = { .width = 154, .height = 86, @@ -2529,6 +2530,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = { static const struct panel_desc logictechno_lt170410_2whc = { .timings = &logictechno_lt170410_2whc_timing, .num_timings = 1, + .bpc = 8, .size = { .width = 217, .height = 136, diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e246d914e7f6..4e83a1891f3e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -250,13 +250,22 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) static size_t get_pgsize(u64 addr, size_t size, size_t *count) { + /* + * io-pgtable only operates on multiple pages within a single table + * entry, so we need to split at boundaries of the table size, i.e. + * the next block size up. The distance from address A to the next + * boundary of block size B is logically B - A % B, but in unsigned + * two's complement where B is a power of two we get the equivalence + * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) + */ size_t blk_offset = -addr % SZ_2M; if (blk_offset || size < SZ_2M) { *count = min_not_zero(blk_offset, size) / SZ_4K; return SZ_4K; } - *count = size / SZ_2M; + blk_offset = -addr % SZ_1G ?: SZ_1G; + *count = min(blk_offset, size) / SZ_2M; return SZ_2M; } diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index c959e8c6be7d..fd2c2eaee26b 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -44,13 +44,18 @@ config DRM_RCAR_LVDS select OF_FLATTREE select OF_OVERLAY -config DRM_RCAR_MIPI_DSI - tristate "R-Car DU MIPI DSI Encoder Support" - depends on DRM && DRM_BRIDGE && OF - select DRM_MIPI_DSI +config DRM_RCAR_USE_MIPI_DSI + bool "R-Car DU MIPI DSI Encoder Support" + depends on DRM_BRIDGE && OF + default DRM_RCAR_DU help Enable support for the R-Car Display Unit embedded MIPI DSI encoders. +config DRM_RCAR_MIPI_DSI + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_MIPI_DSI + select DRM_MIPI_DSI + config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM default y if ARM64 diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index bf6948125b84..f4df9820b295 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -752,7 +752,7 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi) static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi, int mux) { - if (dsi->cdata->lcdsel_grf_reg < 0) + if (dsi->cdata->lcdsel_grf_reg) regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg, mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big); } @@ -1051,23 +1051,31 @@ static int dw_mipi_dsi_rockchip_host_attach(void *priv_data, if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n", ret); - return ret; + goto out; } second = dw_mipi_dsi_rockchip_find_second(dsi); - if (IS_ERR(second)) - return PTR_ERR(second); + if (IS_ERR(second)) { + ret = PTR_ERR(second); + goto out; + } if (second) { ret = component_add(second, &dw_mipi_dsi_rockchip_ops); if (ret) { DRM_DEV_ERROR(second, "Failed to register component: %d\n", ret); - return ret; + goto out; } } return 0; + +out: + mutex_lock(&dsi->usage_mutex); + dsi->usage_mode = DW_DSI_USAGE_IDLE; + mutex_unlock(&dsi->usage_mutex); + return ret; } static int dw_mipi_dsi_rockchip_host_detach(void *priv_data, @@ -1635,7 +1643,6 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = { static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { { .reg = 0xfe060000, - .lcdsel_grf_reg = -1, .lanecfg1_grf_reg = RK3568_GRF_VO_CON2, .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS | RK3568_DSI0_FORCETXSTOPMODE | @@ -1645,7 +1652,6 @@ static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { }, { .reg = 0xfe070000, - .lcdsel_grf_reg = -1, .lanecfg1_grf_reg = RK3568_GRF_VO_CON3, .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS | RK3568_DSI1_FORCETXSTOPMODE | @@ -1681,5 +1687,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", + /* + * For dual-DSI display, one DSI pokes at the other DSI's + * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not + * safe for asynchronous probe. + */ + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index c14f88893868..2f4b8f64cbad 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -565,7 +565,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, ret = rockchip_hdmi_parse_dt(hdmi); if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); return ret; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 614e97aaac80..da8a69953706 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -364,9 +364,12 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv, { struct rockchip_gem_object *rk_obj; struct drm_gem_object *obj; + bool is_framebuffer; int ret; - rk_obj = rockchip_gem_create_object(drm, size, false); + is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file; + + rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index aac20be5ac08..105a548d0abe 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -877,10 +877,14 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc, { struct vop2_video_port *vp = to_vop2_video_port(crtc); struct vop2 *vop2 = vp->vop2; + struct drm_crtc_state *old_crtc_state; int ret; vop2_lock(vop2); + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + drm_crtc_vblank_off(crtc); /* @@ -996,13 +1000,15 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, static void vop2_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *old_pstate = NULL; struct vop2_win *win = to_vop2_win(plane); struct vop2 *vop2 = win->vop2; drm_dbg(vop2->drm, "%s disable\n", win->data->name); - if (!old_pstate->crtc) + if (state) + old_pstate = drm_atomic_get_old_plane_state(state, plane); + if (old_pstate && !old_pstate->crtc) return; vop2_win_disable(win); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6748ec1e0005..a1f909dac89a 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are * likely to be allocated beyond the 32-bit boundary if sufficient diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 2027063fdc30..8c329c071c62 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -476,7 +476,12 @@ static int __init vc4_drm_register(void) if (ret) return ret; - return platform_driver_register(&vc4_platform_driver); + ret = platform_driver_register(&vc4_platform_driver); + if (ret) + platform_unregister_drivers(component_drivers, + ARRAY_SIZE(component_drivers)); + + return ret; } static void __exit vc4_drm_unregister(void) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 596e311d6e58..470432c8fd70 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -349,27 +349,40 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector, if (!crtc_state->active) return 0; - if (!vc4_hdmi_supports_scrambling(encoder)) + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_supports_scrambling(encoder)) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode, vc4_hdmi->output_bpc, vc4_hdmi->output_format); - if (!scrambling_needed) + if (!scrambling_needed) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) + !try_wait_for_completion(&conn_state->commit->hw_done)) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config); if (ret < 0) { drm_err(drm, "Failed to read TMDS config: %d\n", ret); + mutex_unlock(&vc4_hdmi->mutex); return 0; } - if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) + if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } + + mutex_unlock(&vc4_hdmi->mutex); /* * HDMI 2.0 says that one should not send scrambled data @@ -397,9 +410,8 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, * .adap_enable, which leads to that funtion being called with * our mutex held. * - * A similar situation occurs with - * drm_atomic_helper_connector_hdmi_reset_link() that will call - * into our KMS hooks if the scrambling was enabled. + * A similar situation occurs with vc4_hdmi_reset_link() that + * will call into our KMS hooks if the scrambling was enabled. * * Concurrency isn't an issue at the moment since we don't share * any state with any of the other frameworks so we can ignore @@ -3160,9 +3172,16 @@ static int vc4_hdmi_init_resources(struct drm_device *drm, DRM_ERROR("Failed to get HDMI state machine clock\n"); return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock; + vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_rpm_clock); + } + return 0; } @@ -3245,6 +3264,12 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_rpm_clock); + } + vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb"); if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) { DRM_ERROR("Failed to get pixel bvb clock\n"); @@ -3308,7 +3333,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock); return 0; } @@ -3326,11 +3351,11 @@ static int vc4_hdmi_runtime_resume(struct device *dev) * its frequency while the power domain is active so that it * keeps its rate. */ - ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ); if (ret) return ret; - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock); if (ret) return ret; @@ -3343,7 +3368,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev) * case, it will lead to a silent CPU stall. Let's make sure we * prevent such a case. */ - rate = clk_get_rate(vc4_hdmi->hsm_clock); + rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock); if (!rate) { ret = -EINVAL; goto err_disable_clk; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index db823efb2563..1ad8e8c377e2 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -172,6 +172,7 @@ struct vc4_hdmi { struct clk *cec_clock; struct clk *pixel_clock; struct clk *hsm_clock; + struct clk *hsm_rpm_clock; struct clk *audio_clock; struct clk *pixel_bvb_clock; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 4419e810103d..0a6347c05df4 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -197,8 +197,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state) struct drm_private_state *priv_state; priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); - if (IS_ERR(priv_state)) - return ERR_CAST(priv_state); + if (!priv_state) + return ERR_PTR(-EINVAL); return to_vc4_hvs_state(priv_state); } @@ -210,8 +210,8 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state) struct drm_private_state *priv_state; priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); - if (IS_ERR(priv_state)) - return ERR_CAST(priv_state); + if (!priv_state) + return ERR_PTR(-EINVAL); return to_vc4_hvs_state(priv_state); } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 0cd3f97e7e49..f60ea24db0ec 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host) static bool host1x_wants_iommu(struct host1x *host1x) { + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If we support addressing a maximum of 32 bits of physical memory * and if the host1x firewall is enabled, there's no need to enable |