diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
17 files changed, 231 insertions, 64 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a414da22a359..bd5061fbe031 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -223,12 +223,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, *pos &= (1UL << 22) - 1; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || @@ -332,12 +336,16 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -387,12 +395,16 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -443,12 +455,16 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -498,12 +514,16 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -554,12 +574,16 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -609,12 +633,16 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -764,12 +792,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, valuesize = sizeof(values); r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); @@ -842,12 +874,16 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, simd = (*pos & GENMASK_ULL(44, 37)) >> 37; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -937,11 +973,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) - return r; + goto err; r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) - return r; + goto err; /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -967,7 +1003,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { - result = r; + amdgpu_virt_disable_access_debugfs(adev); goto err; } @@ -976,10 +1012,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, size -= 4; } -err: kfree(data); amdgpu_virt_disable_access_debugfs(adev); return result; + +err: + pm_runtime_put_autosuspend(adev->ddev->dev); + kfree(data); + return r; } /** @@ -1003,8 +1043,10 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -1140,8 +1182,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) int r = 0, i; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* Avoid accidently unparking the sched thread during GPU reset */ mutex_lock(&adev->lock_reset); @@ -1197,8 +1241,10 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) int r; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); @@ -1216,8 +1262,10 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) int r; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); @@ -1417,16 +1465,16 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) return -EINVAL; ret = pm_runtime_get_sync(adev->ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); if (ret || val > max_freq || val < min_freq) return -EINVAL; ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); - } else { - return 0; } pm_runtime_mark_last_busy(adev->ddev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 43d8ed7dbd00..652c57a3b847 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -587,7 +587,7 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, attach = dma_buf_dynamic_attach(dma_buf, dev->dev, &amdgpu_dma_buf_attach_ops, obj); if (IS_ERR(attach)) { - drm_gem_object_put(obj); + drm_gem_object_put_unlocked(obj); return ERR_CAST(attach); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d878fe7fee51..3414e119f0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -416,7 +416,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); + + if (irq_src) + amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; @@ -537,8 +539,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(ring); } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) @@ -574,8 +577,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) } /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -601,8 +605,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) continue; /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index d399e5893170..74459927f97f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -465,7 +465,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, unsigned int pages; int i, r; - *sgt = kmalloc(sizeof(*sg), GFP_KERNEL); + *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); if (!*sgt) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 713c32560445..25ebf8f19b85 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -462,7 +462,7 @@ static int jpeg_v2_5_wait_for_idle(void *handle) return ret; } - return ret; + return 0; } static int jpeg_v2_5_set_clockgating_state(void *handle, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 710edc70e37e..0a39a8558b29 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2184,6 +2184,7 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); + drm_add_edid_modes(connector, aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, @@ -8686,6 +8687,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Check connector changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + + /* Skip connectors that are disabled or part of modeset already. */ + if (!old_con_state->crtc && !new_con_state->crtc) + continue; + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); + if (IS_ERR(new_crtc_state)) { + ret = PTR_ERR(new_crtc_state); + goto fail; + } + + if (dm_old_con_state->abm_level != + dm_new_con_state->abm_level) + new_crtc_state->connectors_changed = true; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) goto fail; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index a2e1a73f66b8..5c6a6ae48d39 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -106,7 +106,7 @@ bool dm_pp_apply_display_requirements( adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); - else + else if (adev->smu.ppt_funcs) smu_display_configuration_change(smu, &adev->pm.pm_display_cfg); @@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks( &pp_clk_info); else if (adev->smu.ppt_funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); + else + return false; if (ret) return false; @@ -590,7 +592,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); - else + else if (adev->smu.ppt_funcs) smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 3fab9296918a..e133edc587d3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru return disp_clk_threshold; } -static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) +static void ramp_up_dispclk_with_dpp( + struct clk_mgr_internal *clk_mgr, + struct dc *dc, + struct dc_clocks *new_clocks, + bool safe_to_lower) { int i; int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + /* this function is to change dispclk, dppclk and dprefclk according to + * bandwidth requirement. Its call stack is rv1_update_clocks --> + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw, + * prepare_bandwidth will be called first to allow enough clock, + * watermark for change, after end of dcn hw change, optimize_bandwidth + * is executed to lower clock to save power for new dcn hw settings. + * + * below is sequence of commit_planes_for_stream: + * + * step 1: prepare_bandwidth - raise clock to have enough bandwidth + * step 2: lock_doublebuffer_enable + * step 3: pipe_control_lock(true) - make dchubp register change will + * not take effect right way + * step 4: apply_ctx_for_surface - program dchubp + * step 5: pipe_control_lock(false) - dchubp register change take effect + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream + * for full_date, optimize clock to save power + * + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be + * changed for new dchubp configuration. but real dcn hub dchubps are + * still running with old configuration until end of step 5. this need + * clocks settings at step 1 should not less than that before step 1. + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz + * + * the second condition is based on new dchubp configuration. dppclk + * for new dchubp may be different from dppclk before step 1. + * for example, before step 1, dchubps are as below: + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979) + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080) + * for dppclk for pipe0 need dppclk = dispclk + * + * new dchubp pipe split configuration: + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080) + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080) + * dppclk only needs dppclk = dispclk /2. + * + * dispclk, dppclk are not lock by otg master lock. they take effect + * after step 1. during this transition, dispclk are the same, but + * dppclk is changed to half of previous clock for old dchubp + * configuration between step 1 and step 6. This may cause p-state + * warning intermittently. + * + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we + * need make sure dppclk are not changed to less between step 1 and 6. + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz, + * new display clock is raised, but we do not know ratio of + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz, + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than + * old dppclk. we could ignore power saving different between + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6. + * as long as safe_to_lower = false, set dpclk = dispclk to simplify + * condition check. + * todo: review this change for other asic. + **/ + if (!safe_to_lower) + request_dpp_div = false; + /* set disp clk to dpp clk threshold */ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold); @@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, /* program dispclk on = as a w/a for sleep resume clock ramping issues */ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; send_request_to_lower = true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 48ab51533d5d..31aa31c280ee 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3265,12 +3265,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) core_link_set_avmute(pipe_ctx, true); } + dc->hwss.blank_stream(pipe_ctx); #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, true); #endif - dc->hwss.blank_stream(pipe_ctx); - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index aefd29a440b5..be8f265976b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -503,7 +503,7 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size) { - bool ret = false; + bool success = true; uint32_t payload_size = dal_ddc_service_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; @@ -527,7 +527,6 @@ bool dal_ddc_service_query_ddc_data( * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { struct aux_payload payload; - bool read_available = true; payload.i2c_over_aux = true; payload.address = address; @@ -536,21 +535,26 @@ bool dal_ddc_service_query_ddc_data( if (write_size != 0) { payload.write = true; - payload.mot = false; + /* should not set mot (middle of transaction) to 0 + * if there are pending read payloads + */ + payload.mot = read_size == 0 ? false : true; payload.length = write_size; payload.data = write_buf; - ret = dal_ddc_submit_aux_command(ddc, &payload); - read_available = ret; + success = dal_ddc_submit_aux_command(ddc, &payload); } - if (read_size != 0 && read_available) { + if (read_size != 0 && success) { payload.write = false; + /* should set mot (middle of transaction) to 0 + * since it is the last payload to send + */ payload.mot = false; payload.length = read_size; payload.data = read_buf; - ret = dal_ddc_submit_aux_command(ddc, &payload); + success = dal_ddc_submit_aux_command(ddc, &payload); } } else { struct i2c_command command = {0}; @@ -573,7 +577,7 @@ bool dal_ddc_service_query_ddc_data( command.number_of_payloads = dal_ddc_i2c_payloads_get_count(&payloads); - ret = dm_helpers_submit_i2c( + success = dm_helpers_submit_i2c( ddc->ctx, ddc->link, &command); @@ -581,7 +585,7 @@ bool dal_ddc_service_query_ddc_data( dal_ddc_i2c_payloads_destroy(&payloads); } - return ret; + return success; } bool dal_ddc_submit_aux_command(struct ddc_service *ddc, @@ -598,7 +602,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, do { struct aux_payload current_payload; - bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) > + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= payload->length; current_payload.address = payload->address; @@ -607,7 +611,10 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, current_payload.i2c_over_aux = payload->i2c_over_aux; current_payload.length = is_end_of_payload ? payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - current_payload.mot = !is_end_of_payload; + /* set mot (middle of transaction) to false + * if it is the last payload + */ + current_payload.mot = is_end_of_payload ? payload->mot:true; current_payload.reply = payload->reply; current_payload.write = payload->write; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h index 70ec691e14d2..99c68ca9c7e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h @@ -49,7 +49,7 @@ #define DCN_PANEL_CNTL_REG_LIST()\ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \ - DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ + DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ SR(BL_PWM_CNTL), \ SR(BL_PWM_CNTL2), \ SR(BL_PWM_PERIOD_CNTL), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index da5333d165ac..ec63cb853360 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1386,8 +1386,8 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need to apply existing for plane enable */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || - pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) - && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index cef1aa938ab5..2d9055eb3ce9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3097,7 +3097,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 89ef9f6860e5..16df2a485dd0 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 27c5fc9572b2..e4630a76d7bf 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2042,8 +2042,6 @@ static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write, { int i; - BUG_ON(numbytes > MAX_SW_I2C_COMMANDS); - req->I2CcontrollerPort = 0; req->I2CSpeed = 2; req->SlaveAddress = address; @@ -2081,6 +2079,12 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control, struct smu_table_context *smu_table = &adev->smu.smu_table; struct smu_table *table = &smu_table->driver_table; + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); @@ -2117,6 +2121,12 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control, SwI2cRequest_t req; struct amdgpu_device *adev = to_amdgpu_device(control); + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index aa76c2cea747..7897be877b96 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -164,7 +164,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu) chip_name = "navi12"; break; default: - BUG(); + dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 56923a96b450..ad54f4500af1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr); + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, + VOLTAGE_CONTROLLER_ON)) + ? true : false; } static int ci_smu_init(struct pp_hwmgr *hwmgr) |