diff options
Diffstat (limited to 'drivers/gpu')
53 files changed, 584 insertions, 416 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2d991da2cead..d1ed4f8df2b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -26,6 +26,7 @@ #include <linux/sched/task.h> #include "amdgpu_object.h" +#include "amdgpu_gem.h" #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" @@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct amdgpu_bo_param bp; + struct drm_gem_object *gobj; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", va, size, domain_string(alloc_domain)); - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = 1; - bp.domain = alloc_domain; - bp.flags = alloc_flags; - bp.type = bo_type; - bp.resv = NULL; - ret = amdgpu_bo_create(adev, &bp, &bo); + ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, + bo_type, NULL, &gobj); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", - domain_string(alloc_domain), ret); + domain_string(alloc_domain), ret); goto err_bo_create; } + bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; bo->tbo.ttm->sg = sg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f764803c53a4..48cb33e5b382 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + uint32_t domains; int ret; obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); @@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ - if (obj->import_attach) { + bo = gem_to_amdgpu_bo(obj); + domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); + if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d0a1fee1f5f6..174a73eb23f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, resv = vm->root.base.bo->tbo.base.resv; } -retry: initial_domain = (u32)(0xffffffff & args->in.domains); +retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, flags, ttm_bo_type_device, resv, &gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25ec4d57333f..b4c8e5d5c763 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count) { + if (bo->prime_shared_count || bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 346963e3cf73..d86b42a36560 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -99,6 +99,10 @@ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmCGTS_TCC_DISABLE_Vangogh 0x5006 +#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1 +#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007 +#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 @@ -4936,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) { /* TCCs are global (not instanced). */ - uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + uint32_t tcc_disable; + + switch (adev->asic_type) { + case CHIP_VANGOGH: + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh); + break; + default: + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + break; + } adev->gfx.config.tcc_disabled_mask = REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c6da89df055d..961abf1cf040 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1833,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link) link->type = dc_connection_none; prev_sink = link->local_sink; - if (prev_sink != NULL) - dc_sink_retain(prev_sink); + if (prev_sink) + dc_sink_release(prev_sink); switch (link->connector_signal) { case SIGNAL_TYPE_HDMI_TYPE_A: { @@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, dc_commit_updates_for_stream( dm->dc, bundle->surface_updates, dc_state->stream_status->plane_count, - dc_state->streams[k], &bundle->stream_update, dc_state); + dc_state->streams[k], &bundle->stream_update); } cleanup: @@ -1965,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link) stream_update.stream = stream_state; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update, - stream_state->ctx->dc->current_state); + stream_state, &stream_update); mutex_unlock(&adev->dm.dc_lock); } @@ -2330,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect( * TODO: check if we still need the S3 mode update workaround. * If yes, put it here. */ - if (aconnector->dc_sink) + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); + dc_sink_release(aconnector->dc_sink); + } aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); @@ -2347,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); - drm_add_edid_modes(connector, aconnector->edid); - if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); @@ -7549,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool wait_for_vblank) { - uint32_t i; + int i; uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -7590,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, amdgpu_dm_commit_cursors(state); /* update planes when needed */ - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; @@ -7813,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates, planes_count, acrtc_state->stream, - &bundle->stream_update, - dc_state); + &bundle->stream_update); /** * Enable or disable the interrupts on the backend. @@ -8150,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&dummy_updates, 0, sizeof(dummy_updates)); + memset(&surface_updates, 0, sizeof(surface_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8213,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ for (j = 0; j < status->plane_count; j++) - dummy_updates[j].surface = status->plane_states[0]; + surface_updates[j].surface = status->plane_states[j]; mutex_lock(&dm->dc_lock); dc_commit_updates_for_stream(dm->dc, - dummy_updates, + surface_updates, status->plane_count, dm_new_crtc_state->stream, - &stream_update, - dc_state); + &stream_update); mutex_unlock(&dm->dc_lock); } @@ -8359,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(conn_state); if (ret) - goto err; + goto out; /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); ret = PTR_ERR_OR_ZERO(crtc_state); if (ret) - goto err; + goto out; /* force a restore */ crtc_state->mode_changed = true; @@ -8376,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(plane_state); if (ret) - goto err; - + goto out; /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); - if (!ret) - return 0; -err: - DRM_ERROR("Restoring old state failed with %i\n", ret); +out: drm_atomic_state_put(state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8ab0b9060d2b..f2d8cf34be46 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -833,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; + if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) + return false; + mutex_lock(&aconnector->mst_mgr.lock); if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { mutex_unlock(&aconnector->mst_mgr.lock); @@ -850,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, stream = dc_state->streams[i]; if (stream->timing.flags.DSC == 1) - dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream); + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) + return false; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 58eb0d69873a..6cf1a5a2a5ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state) + struct dc_stream_update *stream_update) { const struct dc_stream_status *stream_status; enum surface_update_type update_type; @@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { + struct dc_plane_state *new_planes[MAX_SURFACES]; + + memset(new_planes, 0, sizeof(new_planes)); + + for (i = 0; i < surface_count; i++) + new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ context = dc_create_state(dc); @@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc, return; } - dc_resource_state_copy_construct(state, context); + dc_resource_state_copy_construct( + dc->current_state, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + /*remove old surfaces from context */ + if (!dc_rem_all_planes_for_stream(dc, stream, context)) { + DC_ERROR("Failed to remove streams for new validate context!\n"); + return; + } - if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) - new_pipe->plane_state->force_full_update = true; + /* add surface to context */ + if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { + DC_ERROR("Failed to add streams for new validate context!\n"); + return; } + } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f95bade59624..1e4794e2825c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -892,13 +892,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte switch (dpcd_aux_read_interval) { case 0x01: - aux_rd_interval_us = 400; + aux_rd_interval_us = 4000; break; case 0x02: - aux_rd_interval_us = 4000; + aux_rd_interval_us = 8000; break; case 0x03: - aux_rd_interval_us = 8000; + aux_rd_interval_us = 12000; break; case 0x04: aux_rd_interval_us = 16000; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index b7910976b81a..e243c01b9672 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state); + struct dc_stream_update *stream_update); /* * Log the current stream state. */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index b000b43a820d..674376428916 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL2, + DCN20_CLK_SRC_PLL3, + DCN20_CLK_SRC_PLL4, DCN20_CLK_SRC_TOTAL_DCN21 }; @@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct( dcn21_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index c20331eb62e0..dfd77b3cc84d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \ ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse endif ifdef CONFIG_PPC64 @@ -45,6 +45,8 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 3ca7d911d25c..09264716d1dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -14,7 +14,7 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse endif ifdef CONFIG_PPC64 @@ -25,6 +25,7 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile index 8d4924b7dc22..101620a8867a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile @@ -13,7 +13,7 @@ DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse endif ifdef CONFIG_PPC64 @@ -24,6 +24,7 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 4bdbcce7092d..0d797fa9f5cc 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -553,6 +553,7 @@ struct pptable_funcs { *clock_req); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); + int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); int (*gfx_off_control)(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index 13de692a4213..5d0b29653ffa 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -203,6 +203,9 @@ int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode); +int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8b867a6d52b5..e84c737e3967 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2151,19 +2151,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { int ret = 0; - uint32_t rpm; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_speed_rpm) { - if (speed > 100) - speed = 100; - rpm = speed * smu->fan_max_rpm / 100; - ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm); - } + if (smu->ppt_funcs->set_fan_speed_percent) + ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); mutex_unlock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index cd7b411457ff..16db0b506b0d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2326,6 +2326,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 51e83123f72a..cd7efa923195 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2456,6 +2456,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 12b36eb0ff6a..d68d3dfee51d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2802,6 +2802,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index b279dbbbce6b..5aeb5f5a0447 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1174,6 +1174,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) } int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t duty100, duty; + uint64_t tmp64; + + if (speed > 100) + speed = 100; + + if (smu_v11_0_auto_fan_control(smu, 0)) + return -EINVAL; + + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + if (!duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); + + return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); +} + +int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { @@ -1181,7 +1210,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, switch (mode) { case AMD_FAN_CTRL_NONE: - ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm); + ret = smu_v11_0_set_fan_speed_percent(smu, 100); break; case AMD_FAN_CTRL_MANUAL: ret = smu_v11_0_auto_fan_control(smu, 0); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 5c1482d4ca43..92ad2cdbae10 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -591,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_socket_power = metrics.CurrentSocketPower; gpu_metrics->average_cpu_power = metrics.Power[0]; gpu_metrics->average_soc_power = metrics.Power[1]; + gpu_metrics->average_gfx_power = metrics.Power[2]; memcpy(&gpu_metrics->average_core_power[0], &metrics.CorePower[0], sizeof(uint16_t) * 8); gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; memcpy(&gpu_metrics->current_coreclk[0], &metrics.CoreFrequency[0], diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 0c98d27f84ac..fee27952ec6d 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -14,6 +14,7 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> +#include <linux/workqueue.h> #include <sound/hdmi-codec.h> @@ -36,6 +37,7 @@ struct lt9611uxc { struct mutex ocm_lock; struct wait_queue_head wq; + struct work_struct work; struct device_node *dsi0_node; struct device_node *dsi1_node; @@ -52,6 +54,8 @@ struct lt9611uxc { bool hpd_supported; bool edid_read; + /* can be accessed from different threads, so protect this with ocm_lock */ + bool hdmi_connected; uint8_t fw_version; }; @@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) if (irq_status) regmap_write(lt9611uxc->regmap, 0xb022, 0); - lt9611uxc_unlock(lt9611uxc); - - if (irq_status & BIT(0)) + if (irq_status & BIT(0)) { lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + wake_up_all(<9611uxc->wq); + } if (irq_status & BIT(1)) { - if (lt9611uxc->connector.dev) - drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); - else - drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1))); + lt9611uxc->hdmi_connected = hpd_status & BIT(1); + schedule_work(<9611uxc->work); } + lt9611uxc_unlock(lt9611uxc); + return IRQ_HANDLED; } +static void lt9611uxc_hpd_work(struct work_struct *work) +{ + struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); + bool connected; + + if (lt9611uxc->connector.dev) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + else { + + mutex_lock(<9611uxc->ocm_lock); + connected = lt9611uxc->hdmi_connected; + mutex_unlock(<9611uxc->ocm_lock); + + drm_bridge_hpd_notify(<9611uxc->bridge, + connected ? + connector_status_connected : + connector_status_disconnected); + } +} + static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) { gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); @@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); unsigned int reg_val = 0; int ret; - int connected = 1; + bool connected = true; + + lt9611uxc_lock(lt9611uxc); if (lt9611uxc->hpd_supported) { - lt9611uxc_lock(lt9611uxc); ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); - lt9611uxc_unlock(lt9611uxc); if (ret) dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); else connected = reg_val & BIT(1); } + lt9611uxc->hdmi_connected = connected; + + lt9611uxc_unlock(lt9611uxc); return connected ? connector_status_connected : connector_status_disconnected; @@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) { return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, - msecs_to_jiffies(100)); + msecs_to_jiffies(500)); } static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) @@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, ret = lt9611uxc_wait_for_edid(lt9611uxc); if (ret < 0) { dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); - return ERR_PTR(ret); + return NULL; + } else if (ret == 0) { + dev_err(lt9611uxc->dev, "wait for EDID timeout\n"); + return NULL; } return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); @@ -926,6 +956,8 @@ retry: lt9611uxc->fw_version = ret; init_waitqueue_head(<9611uxc->wq); + INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work); + ret = devm_request_threaded_irq(dev, client->irq, NULL, lt9611uxc_irq_thread_handler, IRQF_ONESHOT, "lt9611uxc", lt9611uxc); @@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client) struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); disable_irq(client->irq); + flush_scheduled_work(); lt9611uxc_audio_exit(lt9611uxc); drm_bridge_remove(<9611uxc->bridge); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0401b2f47500..8781deefeae3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, return 0; } -static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) +/** + * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link + * @link_rate: link rate in 10kbits/s units + * @link_lane_count: lane count + * + * Calculate the total bandwidth of a MultiStream Transport link. The returned + * value is in units of PBNs/(timeslots/1 MTP). This value can be used to + * convert the number of PBNs required for a given stream to the number of + * timeslots this stream requires in each MTP. + */ +int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count) { - if (dp_link_bw == 0 || dp_link_count == 0) - DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", - dp_link_bw, dp_link_count); + if (link_rate == 0 || link_lane_count == 0) + DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n", + link_rate, link_lane_count); - return dp_link_bw * dp_link_count / 2; + /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ + return link_rate * link_lane_count / 54000; } +EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); /** * drm_dp_read_mst_cap() - check whether or not a sink supports MST @@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } - mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], + mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); if (mgr->pbn_div == 0) { ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index d5ace48b1ace..dc13d1814d95 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2754,13 +2754,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int n_entries, ln; u32 val; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); - /* The table does not have values for level 3 and level 9. */ - if (level >= n_entries || level == 3 || level == 9) { + if (level >= n_entries) { drm_dbg_kms(&dev_priv->drm, "DDI translation not found for level %d. Using %d instead.", - level, n_entries - 2); - level = n_entries - 2; + level, n_entries - 1); + level = n_entries - 1; } /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ @@ -2891,6 +2893,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); if (level >= n_entries) @@ -3532,6 +3537,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } +static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_phy_is_combo(i915, phy)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(i915, phy, false, + crtc_state->lane_count, + lane_reversal); + } +} + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3621,14 +3643,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up * the used lanes of the DDI. */ - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + intel_ddi_power_up_lanes(encoder, crtc_state); /* * 7.g Configure and enable DDI_BUF_CTL @@ -3713,14 +3728,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, else intel_prepare_dp_ddi_buffers(encoder, crtc_state); - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + intel_ddi_power_up_lanes(encoder, crtc_state); intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) @@ -4206,6 +4214,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, reg, val); } + intel_ddi_power_up_lanes(encoder, crtc_state); + /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 53a00cf3fa32..61be6bed9162 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, */ ret = i915_vma_pin_fence(vma); if (ret != 0 && INTEL_GEN(dev_priv) < 4) { - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); vma = ERR_PTR(ret); goto err; } @@ -2327,12 +2327,9 @@ err: void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { - i915_gem_object_lock(vma->obj, NULL); if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); - i915_gem_object_unpin_from_display_plane(vma); - i915_gem_object_unlock(vma->obj); - + i915_vma_unpin(vma); i915_vma_put(vma); } @@ -4807,6 +4804,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } else if (fb->format->is_yuv) { plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } return plane_color_ctl; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 09123e8625c4..8a26307c4896 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4637,24 +4637,6 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 train_set = intel_dp->train_set[0]; - - drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", - train_set & DP_TRAIN_VOLTAGE_SWING_MASK, - train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); - drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", - (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> - DP_TRAIN_PRE_EMPHASIS_SHIFT, - train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? - " (max)" : ""); - - intel_dp->set_signal_levels(intel_dp, crtc_state); -} - void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -5703,7 +5685,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); intel_dp_phy_pattern_update(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 05f7ddf7a795..6620f9efdcbb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -96,9 +96,6 @@ void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat); -void -intel_dp_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 91d3979902d0..d8c6d7054d11 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -334,6 +334,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; } +void intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_set = intel_dp->train_set[0]; + char phy_name[10]; + + drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n", + train_set & DP_TRAIN_VOLTAGE_SWING_MASK, + train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "", + (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT, + train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? + " (max)" : "", + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); + + if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) + intel_dp->set_signal_levels(intel_dp, crtc_state); +} + static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -341,7 +362,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp, u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); } @@ -355,7 +376,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); int ret; - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); ret = drm_dp_dpcd_write(&intel_dp->aux, reg, intel_dp->train_set, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 86905aa24db7..6a1f76bd8c75 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]); +void intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy); void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_stop_link_train(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 27f04aed8764..3286b232be0b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -69,7 +69,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, connector->port, - crtc_state->pbn, 0); + crtc_state->pbn, + drm_dp_get_vc_payload_bw(crtc_state->port_clock, + crtc_state->lane_count)); if (slots == -EDEADLK) return slots; if (slots >= 0) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 52b4f6193b4c..0095c8cac9b4 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) intel_frontbuffer_flip_complete(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); i915_vma_put(vma); } @@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, return 0; out_unpin: - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); out_pin_section: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 019a2d6d807a..3da2544fa1c0 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -618,13 +618,19 @@ skl_program_scaler(struct intel_plane *plane, /* Preoffset values for YUV to RGB Conversion */ #define PREOFF_YUV_TO_RGB_HI 0x1800 -#define PREOFF_YUV_TO_RGB_ME 0x1F00 +#define PREOFF_YUV_TO_RGB_ME 0x0000 #define PREOFF_YUV_TO_RGB_LO 0x1800 #define ROFF(x) (((x) & 0xffff) << 16) #define GOFF(x) (((x) & 0xffff) << 0) #define BOFF(x) (((x) & 0xffff) << 16) +/* + * Programs the input color space conversion stage for ICL HDR planes. + * Note that it is assumed that this stage always happens after YUV + * range correction. Thus, the input to this stage is assumed to be + * in full-range YCbCr. + */ static void icl_program_input_csc(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -672,52 +678,7 @@ icl_program_input_csc(struct intel_plane *plane, 0x0, 0x7800, 0x7F10, }, }; - - /* Matrix for Limited Range to Full Range Conversion */ - static const u16 input_csc_matrix_lr[][9] = { - /* - * BT.601 Limted range YCbCr -> full range RGB - * The matrix required is : - * [1.164384, 0.000, 1.596027, - * 1.164384, -0.39175, -0.812813, - * 1.164384, 2.017232, 0.0000] - */ - [DRM_COLOR_YCBCR_BT601] = { - 0x7CC8, 0x7950, 0x0, - 0x8D00, 0x7950, 0x9C88, - 0x0, 0x7950, 0x6810, - }, - /* - * BT.709 Limited range YCbCr -> full range RGB - * The matrix required is : - * [1.164384, 0.000, 1.792741, - * 1.164384, -0.213249, -0.532909, - * 1.164384, 2.112402, 0.0000] - */ - [DRM_COLOR_YCBCR_BT709] = { - 0x7E58, 0x7950, 0x0, - 0x8888, 0x7950, 0xADA8, - 0x0, 0x7950, 0x6870, - }, - /* - * BT.2020 Limited range YCbCr -> full range RGB - * The matrix required is : - * [1.164, 0.000, 1.678, - * 1.164, -0.1873, -0.6504, - * 1.164, 2.1417, 0.0000] - */ - [DRM_COLOR_YCBCR_BT2020] = { - 0x7D70, 0x7950, 0x0, - 0x8A68, 0x7950, 0xAC00, - 0x0, 0x7950, 0x6890, - }, - }; - const u16 *csc; - - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - csc = input_csc_matrix[plane_state->hw.color_encoding]; - else - csc = input_csc_matrix_lr[plane_state->hw.color_encoding]; + const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | GOFF(csc[1])); @@ -734,14 +695,8 @@ icl_program_input_csc(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - 0); - else - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), PREOFF_YUV_TO_RGB_LO); intel_de_write_fw(dev_priv, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index fcce6909f201..3d435bfff764 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -387,48 +387,6 @@ err: return vma; } -static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_vma *vma; - - if (list_empty(&obj->vma.list)) - return; - - mutex_lock(&i915->ggtt.vm.mutex); - spin_lock(&obj->vma.lock); - for_each_ggtt_vma(vma, obj) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - GEM_BUG_ON(vma->vm != &i915->ggtt.vm); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); - } - spin_unlock(&obj->vma.lock); - mutex_unlock(&i915->ggtt.vm.mutex); - - if (i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - - if (obj->mm.madv == I915_MADV_WILLNEED && - !atomic_read(&obj->mm.shrink_pin)) - list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } -} - -void -i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) -{ - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(vma->obj); - - i915_vma_unpin(vma); -} - /** * Moves a single object to the CPU read, and possibly write domain. * @obj: object to act on @@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, else err = i915_gem_object_set_to_cpu_domain(obj, write_domain); - /* And bump the LRU for this access */ - i915_gem_object_bump_inactive_ggtt(obj); - i915_gem_object_unlock(obj); if (write_domain) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index be14486f63a7..4556afe18f16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view, unsigned int flags); -void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index 94465374ca2f..e961ad6a3129 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma, &cb_kernel_ivb, desc_count); + /* Reset inherited context registers */ + gen7_emit_pipeline_invalidate(&cmds); + batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); + batch_add(&cmds, 0xffff0000); + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); + batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + gen7_emit_pipeline_flush(&cmds); + + /* Switch to the media pipeline and our base address */ gen7_emit_pipeline_invalidate(&cmds); batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); batch_add(&cmds, MI_NOOP); @@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma, gen7_emit_state_base_address(&cmds, descriptors); gen7_emit_pipeline_invalidate(&cmds); + /* Set the clear-residual kernel state */ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); + /* Execute the kernel on all HW threads */ for (i = 0; i < num_primitives(bv); i++) gen7_emit_media_object(&cmds, i); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 0625cbb3b431..1d1757584f49 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -187,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) intel_engine_add_retire(b->irq_engine, tl); } -static bool __signal_request(struct i915_request *rq) -{ - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - - if (!__dma_fence_signal(&rq->fence)) { - i915_request_put(rq); - return false; - } - - return true; -} - static struct llist_node * slist_add(struct llist_node *node, struct llist_node *head) { @@ -269,9 +257,11 @@ static void signal_irq_work(struct irq_work *work) release = remove_signaling_context(b, ce); spin_unlock(&ce->signal_lock); - if (__signal_request(rq)) + if (__dma_fence_signal(&rq->fence)) /* We own signal_node now, xfer to local list */ signal = slist_add(&rq->signal_node, signal); + else + i915_request_put(rq); if (release) { add_retire(b, ce->timeline); @@ -358,6 +348,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b) kfree(b); } +static void irq_signal_request(struct i915_request *rq, + struct intel_breadcrumbs *b) +{ + if (!__dma_fence_signal(&rq->fence)) + return; + + i915_request_get(rq); + if (llist_add(&rq->signal_node, &b->signaled_requests)) + irq_work_queue(&b->irq_work); +} + static void insert_breadcrumb(struct i915_request *rq) { struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; @@ -367,17 +368,13 @@ static void insert_breadcrumb(struct i915_request *rq) if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) return; - i915_request_get(rq); - /* * If the request is already completed, we can transfer it * straight onto a signaled list, and queue the irq worker for * its signal completion. */ if (__i915_request_is_complete(rq)) { - if (__signal_request(rq) && - llist_add(&rq->signal_node, &b->signaled_requests)) - irq_work_queue(&b->irq_work); + irq_signal_request(rq, b); return; } @@ -408,6 +405,8 @@ static void insert_breadcrumb(struct i915_request *rq) break; } } + + i915_request_get(rq); list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); @@ -448,19 +447,25 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) void i915_request_cancel_breadcrumb(struct i915_request *rq) { + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; bool release; - if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + spin_lock(&ce->signal_lock); + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { + spin_unlock(&ce->signal_lock); return; + } - spin_lock(&ce->signal_lock); list_del_rcu(&rq->signal_link); - release = remove_signaling_context(rq->engine->breadcrumbs, ce); + release = remove_signaling_context(b, ce); spin_unlock(&ce->signal_lock); if (release) intel_context_put(ce); + if (__i915_request_is_complete(rq)) + irq_signal_request(rq, b); + i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index cf94525be2c1..db8c66dde655 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt) mutex_init(&ggtt->error_mutex); if (ggtt->mappable_end) { - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, - &ggtt->error_capture, - PAGE_SIZE, 0, - I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; + /* + * Reserve a mappable slot for our lockless error capture. + * + * We strongly prefer taking address 0x0 in order to protect + * other critical buffers against accidental overwrites, + * as writing to address 0 is a very common mistake. + * + * Since 0 may already be in use by the system (e.g. the BIOS + * framebuffer), we let the reservation fail quietly and hope + * 0 remains reserved always. + * + * If we fail to reserve 0, and then fail to find any space + * for an error-capture, remain silent. We can afford not + * to reserve an error_capture node as we have fallback + * paths, and we trust that 0 will remain reserved. However, + * the only likely reason for failure to insert is a driver + * bug, which we expect to cause other failures... + */ + ggtt->error_capture.size = I915_GTT_PAGE_SIZE; + ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; + if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) + drm_mm_insert_node_in_range(&ggtt->vm.mm, + &ggtt->error_capture, + ggtt->error_capture.size, 0, + ggtt->error_capture.color, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); } + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_dbg(&ggtt->vm.i915->drm, + "Reserved GGTT:[%llx, %llx] for use by error capture\n", + ggtt->error_capture.start, + ggtt->error_capture.start + ggtt->error_capture.size); /* * The upper portion of the GuC address space has a sizeable hole @@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt) /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - drm_dbg_kms(&ggtt->vm.i915->drm, - "clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); + drm_dbg(&ggtt->vm.i915->drm, + "clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 10a865f3dc09..9ed19b8bca60 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref) int __i915_active_wait(struct i915_active *ref, int state) { - int err; - might_sleep(); - if (!i915_active_acquire_if_busy(ref)) - return 0; - /* Any fence added after the wait begins will not be auto-signaled */ - err = flush_lazy_signals(ref); - i915_active_release(ref); - if (err) - return err; + if (i915_active_acquire_if_busy(ref)) { + int err; - if (!i915_active_is_idle(ref) && - ___wait_var_event(ref, i915_active_is_idle(ref), - state, 0, 0, schedule())) - return -EINTR; + err = flush_lazy_signals(ref); + i915_active_release(ref); + if (err) + return err; + if (___wait_var_event(ref, i915_active_is_idle(ref), + state, 0, 0, schedule())) + return -EINTR; + } + + /* + * After the wait is complete, the caller may free the active. + * We have to flush any concurrent retirement before returning. + */ flush_work(&ref->work); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 632c713227dc..c6964f82a1bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) { const unsigned int pi = __platform_mask_index(info, p); - return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; + return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); } static __always_inline bool diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index c53a222e3dec..713770fb2b92 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg) vma = i915_vma_instance(out, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto out_put_batch; + goto out_put_out; } err = i915_vma_pin(vma, 0, 0, diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c index 302d4e6fc52f..788db043a342 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV507C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV507C, SET_PROCESSING, - NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV507C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV507C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8); diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c index 18d34096f125..093d4ba6910e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c @@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV827C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV827C, SET_PROCESSING, - NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV827C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV827C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index c6367035970e..5f4f09a601d4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2663,6 +2663,14 @@ nv50_display_create(struct drm_device *dev) else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; + if (disp->disp->object.oclass >= GK104_DISP) { + dev->mode_config.cursor_width = 256; + dev->mode_config.cursor_height = 256; + } else { + dev->mode_config.cursor_width = 64; + dev->mode_config.cursor_height = 64; + } + /* create crtc objects to represent the hw heads */ if (disp->disp->object.oclass >= GV100_DISP) crtcs = nvif_rd32(&device->object, 0x610060) & 0xff; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index a5d827403660..ea9f8667305e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -22,6 +22,7 @@ #include "head.h" #include "core.h" +#include "nvif/push.h" #include <nvif/push507c.h> #include <nvhw/class/cl917d.h> @@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh) return 0; } +static int +head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 5); + if (ret) + return ret; + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i), + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) | + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND), + + HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8); + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle); + return 0; +} + int head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) @@ -101,7 +127,7 @@ head917d = { .core_clr = head907d_core_clr, .curs_layout = head917d_curs_layout, .curs_format = head507d_curs_format, - .curs_set = head907d_curs_set, + .curs_set = head917d_curs_set, .curs_clr = head907d_curs_clr, .base = head917d_base, .ovly = head907d_ovly, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index ce451242f79e..271de3a63f21 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw) nvif_notify_get(&wndw->notify); } +static const u64 nv50_cursor_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + int nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, enum drm_plane_type type, const char *name, int index, @@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, struct nvif_mmu *mmu = &drm->client.mmu; struct nv50_disp *disp = nv50_disp(dev); struct nv50_wndw *wndw; + const u64 *format_modifiers; int nformat; int ret; @@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, for (nformat = 0; format[nformat]; nformat++); - ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, - format, nformat, - nouveau_display(dev)->format_modifiers, - type, "%s-%d", name, index); + if (type == DRM_PLANE_TYPE_CURSOR) + format_modifiers = nv50_cursor_format_modifiers; + else + format_modifiers = nouveau_display(dev)->format_modifiers; + + ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat, + format_modifiers, type, "%s-%d", name, index); if (ret) { kfree(*pwndw); *pwndw = NULL; diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h index 2a2612d6e1e0..fb223723a38a 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h @@ -66,6 +66,10 @@ #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 #define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h index 168d7694ede5..6d3a8a3d2087 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push.h @@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push) } while(0) #endif -#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \ - PUSH_##o##_HDR((p), s, mA, (c)+(n)); \ - PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ +#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \ + PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \ + PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ } while(0) -#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ - PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ + PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ - PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ + PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ - PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ + PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ - PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ + PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ - PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ + PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ - PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ + PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ - PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ + PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ - PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ + PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ - PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ + PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_1D(X,o,p,s,mA,dA) \ - PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA)) -#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ - PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ - PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ - PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ - PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1D(X,o,p,s,mA,dA) \ + PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA)) +#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ + PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ + PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ + PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ + PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \ - PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \ - PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \ - PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \ - PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \ - PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \ - X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \ + X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) -#define PUSH_1P(X,o,p,s,mA,dp,ds) \ - PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp)) -#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ - PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \ - X##mA, (dA)) -#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ - PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1P(X,o,p,s,mA,dp,ds) \ + PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp)) +#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ + PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \ + X##mA, (dA)) +#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ + PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL #define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c85b1af06b7b..7ea367a5444d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_TO_DEVICE); + num_pages * PAGE_SIZE, DMA_TO_DEVICE); + i += num_pages; + } } void @@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_FROM_DEVICE); + num_pages * PAGE_SIZE, DMA_FROM_DEVICE); + i += num_pages; + } } void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 4f69e4c3dafd..1c3f890377d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, struct drm_nouveau_svm_init *args = data; int ret; + /* We need to fail if svm is disabled */ + if (!cli->drm->svm) + return -ENOSYS; + /* Allocate tracking for SVM-enabled VMM. */ if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 11e0313db0ea..74bf1c84b637 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -84,7 +84,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, * put_page() on a TTM allocated page is illegal. */ if (order) - gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | + gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (!pool->use_dma_alloc) { diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index cccd341e5d67..3b722252d1fb 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -620,11 +620,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * for now we just allocate globally. */ if (!hvs->hvs5) - /* 96kB */ - drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + /* 48k words of 2x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else - /* 70k words */ - drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024); + /* 60k words of 4x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024); /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 6b39cc2ca18d..5612cab55227 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -437,6 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) static u32 vc4_lbm_size(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); u32 pix_per_line; u32 lbm; @@ -472,7 +473,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) lbm = pix_per_line * 16; } - lbm = roundup(lbm, 32); + /* Align it to 64 or 128 (hvs5) bytes */ + lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); + + /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ + lbm /= vc4->hvs->hvs5 ? 4 : 2; return lbm; } @@ -912,9 +917,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (!vc4_state->is_unity) { vc4_dlist_write(vc4_state, VC4_SET_FIELD(vc4_state->crtc_w, - SCALER_POS1_SCL_WIDTH) | + SCALER5_POS1_SCL_WIDTH) | VC4_SET_FIELD(vc4_state->crtc_h, - SCALER_POS1_SCL_HEIGHT)); + SCALER5_POS1_SCL_HEIGHT)); } /* Position Word 2: Source Image Size */ |