diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
142 files changed, 9089 insertions, 1393 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 71991a28a775..313183b80032 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0 depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN1_0 help - Choose this option if you want to have - Navi support for display engine + Choose this option if you want to have + Navi support for display engine config DRM_AMD_DC_DCN2_1 - bool "DCN 2.1 family" - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN2_0 - help - Choose this option if you want to have - Renoir support for display engine + bool "DCN 2.1 family" + depends on DRM_AMD_DC && X86 + depends on DRM_AMD_DC_DCN2_0 + help + Choose this option if you want to have + Renoir support for display engine config DRM_AMD_DC_DSC_SUPPORT bool "DSC support" @@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT depends on DRM_AMD_DC_DCN1_0 depends on DRM_AMD_DC_DCN2_0 help - Choose this option if you want to have - Dynamic Stream Compression support + Choose this option if you want to have + Dynamic Stream Compression support + +config DRM_AMD_DC_HDCP + bool "Enable HDCP support in DC" + depends on DRM_AMD_DC + help + Choose this option + if you want to support + HDCP authentication config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 496cee000f10..36b3d6a5d04d 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp +endif #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +DAL_LIBS += modules/hdcp +endif + AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) include $(AMD_DAL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 94911871eb9b..9a3b7bf8ab0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o endif +ifdef CONFIG_DRM_AMD_DC_HDCP +AMDGPUDM += amdgpu_dm_hdcp.o +endif + ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4139f129eafb..7aac9568d3be 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,9 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif #include "amdgpu_pm.h" #include "amd_shared.h" @@ -67,6 +70,7 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> +#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); +static void amdgpu_dm_set_psr_caps(struct dc_link *link); +static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); + + /* * dm_vblank_get_counter * @@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; @@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params) } } +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: ignored + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct dc_callback_init init_params; +#endif + adev->dm.ddev = adev->ddev; adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&init_params, 0, sizeof(init_params)); +#endif mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); @@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) init_data.flags.multi_mon_pp_mclk_switch = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + init_data.flags.power_down_display_on_boot = true; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 @@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); +#endif + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; int ret = 0; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", - aconnector, aconnector->base.base.id); + aconnector, + aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); - ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; - return ret; - } + aconnector->dc_link->type = + dc_connection_single; + break; } + } } + drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } @@ -940,6 +1000,11 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* todo will enable for navi10 */ if (adev->asic_type <= CHIP_RAVEN) { ret = dmcu_load_iram(dmcu, params); @@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; int ret; bool need_hotplug = false; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_port) @@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; } } } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(dev); @@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that @@ -1163,6 +1226,7 @@ static int dm_resume(void *handle) struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; @@ -1185,17 +1249,18 @@ static int dm_resume(void *handle) /* program HPD filter */ dc_resume(dm->dc); - /* On resume we need to rewrite the MSTM control bits to enamble MST*/ - s3_handle_mst(ddev, false); - /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + /* Do detection*/ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -1223,6 +1288,7 @@ static int dm_resume(void *handle) amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } + drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) @@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; +#ifdef CONFIG_DRM_AMD_DC_HDCP + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +#endif } mutex_unlock(&dev->mode_config.mutex); @@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct amdgpu_device *adev = dev->dev_private; +#endif /* * In case of failure or MST no need to update connector status or notify the OS @@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); +#endif if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + union hpd_irq_data hpd_irq_data; + struct amdgpu_device *adev = dev->dev_private; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); +#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && +#else if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && +#endif !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param) drm_kms_helper_hotplug_event(dev); } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); +#endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) dm_handle_hpd_rx_irq(aconnector); @@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + if (amdgpu_dc_feature_mask & DC_PSR_MASK) + amdgpu_dm_set_psr_caps(link); } @@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; - memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; @@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode( if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = @@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->dm_stream_context = aconnector; + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_link_get_link_cap(aconnector->dc_link)); if (dsc_caps.is_dsc_supported) - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) @@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + if (stream->link->psr_feature_enabled) { + struct dc *core_dc = stream->link->ctx->dc; + + if (dc_is_dmcu_initialized(core_dc)) { + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + stream->psr_version = dmcu->dmcu_version.psr_version; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + } + } finish: dc_sink_release(sink); @@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec result = MODE_OK; else DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->vdisplay, mode->hdisplay, + mode->vdisplay, mode->clock, dc_result); @@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, tv.num_shared = 1; list_add(&tv.head, &list); - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true); + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); if (r) { dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; @@ -4837,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st) static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) { - return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); + struct drm_encoder *encoder; + + /* There is only one encoder per connector */ + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; } static void amdgpu_dm_get_native_mode(struct drm_connector *connector) @@ -5082,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_vrr_capable_property( &aconnector->base); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + drm_connector_attach_content_protection_property(&aconnector->base, false); +#endif } } @@ -5324,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +static bool is_content_protection_different(struct drm_connector_state *state, + const struct drm_connector_state *old_state, + const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + /* CP is being re enabled, ignore this */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled + * hot-plug, headless s3, dpms + */ + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && + aconnector->dc_sink != NULL) + return true; + + if (old_state->content_protection == state->content_protection) + return false; + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + return true; + + return false; +} + +static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); + else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); + +} +#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -5665,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; + bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -5710,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; + if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) + swizzle = false; + bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -5864,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { + bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -5899,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->vrr_params.adjust); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + dc_commit_updates_for_stream(dm->dc, bundle->surface_updates, planes_count, acrtc_state->stream, &bundle->stream_update, dc_state); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->psr_version && + !acrtc_state->stream->link->psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_feature_enabled && + !acrtc_state->stream->link->psr_allow_active && + swizzle) { + amdgpu_dm_psr_enable(acrtc_state->stream); + } + mutex_unlock(&dm->dc_lock); } @@ -6215,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc->hwmode = new_crtc_state->mode; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); - /* i.e. reset mode */ - if (dm_old_crtc_state->stream) + if (dm_old_crtc_state->stream) { + if (dm_old_crtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(dm_old_crtc_state->stream); + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + } } } /* for_each_crtc_in_state() */ @@ -6248,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + + if (acrtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + continue; + } + + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); + } +#endif /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -6287,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!scaling_changed && !abm_changed && !hdr_changed) continue; + stream_update.stream = dm_new_crtc_state->stream; if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; @@ -7158,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - + stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. @@ -7569,3 +7799,92 @@ update: freesync_capable); } +static void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return; + if (link->type == dc_connection_none) + return; + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + dpcd_data, sizeof(dpcd_data))) { + link->psr_feature_enabled = dpcd_data[0] ? true:false; + DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + } +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + + if (psr_config.psr_version > 0) { + psr_config.psr_exit_link_training_required = 0x1; + psr_config.psr_frame_capture_indication_req = 0; + psr_config.psr_rfb_setup_time = 0x37; + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; + psr_config.allow_smu_optimizations = 0x0; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + struct dc_static_screen_events triggers = {0}; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + triggers.cursor_update = true; + triggers.overlay_update = true; + triggers.surface_update = true; + + dc_stream_set_static_screen_events(link->ctx->dc, + &stream, 1, + &triggers); + + return dc_link_set_psr_allow_active(link, true, false); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, false, true); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c8c525a2b505..77c5166e6b08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps { * @display_indexes_num: Max number of display streams supported * @irq_handler_list_table_lock: Synchronizes access to IRQ tables * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @cached_state: Caches device atomic state for suspend/resume * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info */ @@ -128,7 +134,7 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** - * @atomic_obj + * @atomic_obj: * * In combination with &dm_atomic_state it helps manage * global atomic state that doesn't map cleanly into existing @@ -225,6 +231,9 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct hdcp_workqueue *hdcp_workqueue; +#endif struct drm_atomic_state *cached_state; @@ -234,6 +243,8 @@ struct amdgpu_display_manager { uint32_t dmcu_fw_version; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** + * @soc_bounding_box: + * * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ @@ -287,6 +298,7 @@ struct amdgpu_dm_connector { uint32_t debugfs_dpcd_address; uint32_t debugfs_dpcd_size; #endif + bool force_yuv420_output; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL); + dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a549c7c717dd..eaad9099bc0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, } /* Configure dithering */ - if (!dm_need_crc_dither(source)) + if (!dm_need_crc_dither(source)) { dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); - else + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } unlock: mutex_unlock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f3dfb2887ae0..bdb37e611015 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -942,6 +942,52 @@ static const struct { {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint32_t psr_state = 0; + + dc_link_get_psr_state(link, &psr_state); + + *val = psr_state; + + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) dp_debugfs_entries[i].fops); } } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, + &force_yuv420_output_fops); + } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 000000000000..77181ddf6c8e --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,346 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/drm_hdcp.h> + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + +} + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + + + + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + +} +static void event_property_update(struct work_struct *work) +{ + + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct drm_device *dev = hdcp_work->aconnector->base.dev; + long ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + + if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + + if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + + + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + + mutex_lock(&hdcp_work->mutex); + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + + +void hdcp_destroy(struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + kfree(hdcp_work); + +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->stream_enc_inst; + link->dig_be = config->link_enc_inst; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->adjust.hdcp2.disable = 1; + +} + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + int i = 0; + + if (hdcp_work == NULL) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->handle = hdcp_work; + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work); + + return NULL; + + + +} + + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 000000000000..d3ba505d0696 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status; + uint8_t max_link; +}; + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, + struct amdgpu_dm_connector *aconnector); +void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ee1dc75f5ddc..11e5784aa62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count <= 0) { - DRM_INFO("SADs count is: %d, don't need to read it\n", - sad_count); + if (sad_count < 0) + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return result; - } edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; for (i = 0; i < edid_caps->audio_mode_count; ++i) { @@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { @@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) - return false; + return ACT_FAILED; mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) - return false; + return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) - return false; + return ACT_FAILED; - return true; + return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index fa5d503d379c..64445c4cc4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); @@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) true); } } + drm_connector_list_iter_end(&iter); } /** @@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; @@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) false); } } + drm_connector_list_iter_end(&iter); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 16218a202b59..2bf8534c18fb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,7 +36,9 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" - +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = -EIO; break; case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: result = -EBUSY; break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: @@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; - if (amdgpu_dm_connector->edid) { - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; - } + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); @@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); struct drm_dp_mst_port *port = amdgpu_dm_connector->port; +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); + amdgpu_dm_connector->debugfs_dpcd_address = 0; + amdgpu_dm_connector->debugfs_dpcd_size = 0; +#endif + return drm_dp_mst_connector_late_register(connector, port); } @@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, @@ -245,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *connector_state) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + return &to_amdgpu_dm_connector(connector)->mst_encoder->base; +} + +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; - return &amdgpu_dm_connector->mst_encoder->base; + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = dm_mst_best_encoder, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -416,7 +419,11 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, - aconnector->base.name, dm->adev->dev); + &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index f4cfa0caeba8..55a520a63712 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type( /* Error in pplib. Provide default values. */ return true; } - } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { if (smu_get_clock_by_type(&adev->smu, dc_to_pp_clock_type(clk_type), &pp_clks)) { @@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type( validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } - } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) { if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); validation_clks.engine_max_clock = 72000; @@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request( ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); - else if (adev->smu.funcs && - adev->smu.funcs->display_clock_voltage_request) + else if (adev->smu.ppt_funcs && + adev->smu.ppt_funcs->display_clock_voltage_request) ret = smu_display_clock_voltage_request(&adev->smu, &pp_clock_request); if (ret) @@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks( ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); if (ret) return false; @@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); - else if (adev->smu.funcs && - adev->smu.funcs->set_watermarks_for_clock_ranges) + else smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges); + &wm_with_clock_ranges); } void pp_rv_set_pme_wa_enable(struct pp_smu *pp) @@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) if (pp_funcs && pp_funcs->notify_smu_enable_pwe) pp_funcs->notify_smu_enable_pwe(pp_handle); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) smu_notify_smu_enable_pwe(&adev->smu); } @@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - struct smu_context *smu = &adev->smu; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; @@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (!smu->funcs) - return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL; - * 1: fail - */ - if (smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges)) - return PP_SMU_RESULT_UNSUPPORTED; + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); return PP_SMU_RESULT_OK; } @@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */ if (smu_set_azalia_d3_pme(smu)) return PP_SMU_RESULT_FAIL; @@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ if (smu_set_display_count(smu, count)) return PP_SMU_RESULT_FAIL; @@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ if (smu_set_deep_sleep_dcefclk(smu, mhz)) return PP_SMU_RESULT_FAIL; @@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; switch (clock_id) { @@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, } clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc) + if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) + if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; @@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, if (!smu->ppt_funcs->get_uclk_dpm_states) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->ppt_funcs->get_uclk_dpm_states(smu, + if (!smu_get_uclk_dpm_states(smu, clock_values_in_khz, num_states)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; } +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 +enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz; + + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz; + + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz; + + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz; + + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz; + + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz; + + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; +} +#endif + void dm_pp_get_funcs( struct dc_context *ctx, struct pp_smu_funcs *funcs) @@ -945,6 +1019,15 @@ void dm_pp_get_funcs( funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; #endif + +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; +#endif default: DRM_ERROR("smu version is not supported !\n"); break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 627982cb15d2..a160512a2f04 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -48,6 +48,10 @@ DC_LIBS += dce110 DC_LIBS += dce100 DC_LIBS += dce80 +ifdef CONFIG_DRM_AMD_DC_HDCP +DC_LIBS += hdcp +endif + AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) include $(AMD_DC) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..823843cd2613 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..7873abea4112 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c43797bea413..8828dd9c3783 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt( return display_count; } +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (dc->hwss.exit_optimized_pwr_state) + dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); + + if (edp_link) { + clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active; + dc_link_set_psr_allow_active(edp_link, false, false); + } + +} + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (edp_link) + dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false); + + if (dc->hwss.optimize_pwr_state) + dc->hwss.optimize_pwr_state(dc, dc->current_state); + +} struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index c5c8c4901eed..26db1c5d4e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) /* Calculate the current DFS clock, in kHz.*/ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -239,7 +239,7 @@ int dce_set_clock( /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 64); + clk_mgr_dce->base.dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; @@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce) int i; if (bp->integrated_info) - clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) { - clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) - clk_mgr_dce->dentist_vco_freq_khz = 3600000; + clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) { + clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) + clk_mgr_dce->base.dentist_vco_freq_khz = 3600000; } /*update the maximum display clock for each power state*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 7c746ef1e32e..a6c46e903ff9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 62); + clk_mgr_dce->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; @@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr->dentist_vco_freq_khz / 62); + clk_mgr->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..3fab9296918a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, ASSERT(clk_mgr->pp_smu); + if (dc->work_arounds.skip_clock_update) + return; + pp_smu = &clk_mgr->pp_smu->rv_funcs; display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_ clk_mgr->base.dprefclk_khz = 600000; if (bp->integrated_info) - clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) { - clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) { + clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; } if (!debug->disable_dfs_bypass && bp->integrated_info) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3e8ac303bd52..25d7b7c6681c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, { int i; + clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz; - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; + /* Loop index will match dpp->inst if resource exists, + * and we want to avoid dependency on dpp object + */ + dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz, false); + clk_mgr->dccg, dpp_inst, dppclk_khz); } } -static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) { int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; - - uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); - - REG_UPDATE(DENTIST_DISPCLK_CNTL, - DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); -} - -static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) -{ + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); +// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100); + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); } -static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dispclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dispclk_khz = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - - update_display_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); -} - -static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dppclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dppclk_khz = khz; - clk_mgr->dccg->ref_dppclk = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - - update_global_dpp_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); -} void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, @@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc *dc = clk_mgr_base->ctx->dc; struct pp_smu_funcs_nv *pp_smu = NULL; int display_count; + bool update_dppclk = false; bool update_dispclk = false; bool enter_display_off = false; + bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; - int i; if (dc->work_arounds.skip_clock_update) return; @@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); } - clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; @@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000); } - if (dc->config.forced_clocks == false) { - // First update display clock - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) - request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz); - - // Updating DPP clock requires some more logic - if (!safe_to_lower) { - // For pre-programming, we need to make sure any DPP clock that will go up has to go up + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; - // First raise the global reference if needed - if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - // Then raise any dividers that need raising - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + update_dppclk = true; + } - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + update_dispclk = true; + } - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true); - } + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dentist(clk_mgr); } else { - // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs - - if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); - - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; - - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false); - } + // if clock is being raised, increase refclk before lowering DTO + if (update_dppclk || update_dispclk) + dcn20_update_clocks_update_dentist(clk_mgr); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } } + if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { /*update dmcu for wait_loop count*/ @@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; @@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; } - /* Both fclk and dppclk ref are run on the same scemi clock so we - * need to keep the same value for both + /* Both fclk and ref_dppclk run on the same scemi clock. + * So take the higher value since the DPP DTO is typically programmed + * such that max dppclk is 1:1 with ref_dppclk. */ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; + // Both fclk and ref_dppclk run on the same scemi clock. + clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; + dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); } @@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, } } +static bool dcn2_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->socclk_khz != b->socclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + else if (a->phyclk_khz != b->phyclk_khz) + return false; + else if (a->dramclk_khz != b->dramclk_khz) + return false; + else if (a->p_state_change_support != b->p_state_change_support) + return false; + + return true; +} + static struct clk_mgr_funcs dcn2_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn2_update_clocks, .init_clocks = dcn2_init_clocks, .enable_pme_wa = dcn2_enable_pme_wa, .get_clock = dcn2_get_clock, + .are_clock_states_equal = dcn2_are_clock_states_equal, }; @@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3850000; + clk_mgr->base.dentist_vco_freq_khz = 3850000; } else { /* DFS Slice 2 should be used for DPREFCLK */ @@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct( pll_req = dc_fixpt_mul_int(pll_req, 100000); /* integer part is now VCO frequency in kHz */ - clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3850000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; /* Calculate the DPREFCLK in kHz.*/ clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; } //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index ac31a9787305..c9fd824f3c23 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f4..790a2d211bd6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -52,6 +52,45 @@ #define REG(reg_name) \ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ +int rn_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool hdmi_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + hdmi_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && hdmi_present) + display_count = 1; + + return display_count; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; - struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - if (display_count == 0) - enter_display_off = true; + if (dc->work_arounds.skip_clock_update) + return; - if (enter_display_off == safe_to_lower) { - rn_vbios_smu_set_display_count(clk_mgr, display_count); + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + if (safe_to_lower) { + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + + display_count = rn_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } } if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) { @@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); - if (update_dppclk) + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } @@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - s->dprefclk_khz = sb.dprefclk; + s->dprefclk_khz = sb.dprefclk * 1000; } void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) rn_vbios_smu_enable_pme_wa(clk_mgr); } +void rn_init_clocks(struct clk_mgr *clk_mgr) +{ + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; +} + +void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +{ + int i, num_valid_sets; + + num_valid_sets = 0; + + for (i = 0; i < WM_SET_COUNT; i++) { + /* skip empty entries, the smu array has no holes*/ + if (!bw_params->wm_table.entries[i].valid) + continue; + + ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; + /* We will not select WM based on dcfclk, so leave it as unconstrained */ + ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + /* fclk wil be used to select WM*/ + + if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { + if (i == 0) + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; + else { + /* add 1 to make it non-overlapping with next lvl */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; + } + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + + } else { + /* unconstrained for memory retraining */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Modify previous watermark range to cover up to max */ + ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + num_valid_sets++; + } + + ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ + ranges->num_reader_wm_sets = num_valid_sets; + + /* modify the min and max to make sure we cover the whole range*/ + ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* This is for writeback only, does not matter currently as no writeback support*/ + ranges->num_writer_wm_sets = 1; + ranges->writer_wm_sets[0].wm_inst = WM_A; + ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + +} + +static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; + struct pp_smu_wm_range_sets ranges = {0}; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; + + if (!debug->disable_pplib_wm_range) { + build_watermark_ranges(clk_mgr_base->bw_params, &ranges); + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) + pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + } + +} + static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, - .init_clocks = dcn2_init_clocks, + .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers */ + /* .dump_clk_registers = rn_dump_clk_registers, */ + .notify_wm_ranges = rn_notify_wm_ranges }; struct clk_bw_params rn_bw_params = { @@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = { } }; -void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { - int i, num_valid_sets; - - num_valid_sets = 0; - - for (i = 0; i < WM_SET_COUNT; i++) { - /* skip empty entries, the smu array has no holes*/ - if (!bw_params->wm_table.entries[i].valid) - continue; - - ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; - /* We will not select WM based on dcfclk, so leave it as unconstrained */ - ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - /* fclk wil be used to select WM*/ - - if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { - if (i == 0) - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; - else { - /* add 1 to make it non-overlapping with next lvl */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; - } - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - - } else { - /* unconstrained for memory retraining */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + int i; - /* Modify previous watermark range to cover up to max */ - ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - } - num_valid_sets++; + for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { + if (clock_table->DcfClocks[i].Vol == voltage) + return clock_table->DcfClocks[i].Freq; } - ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ - ranges->num_reader_wm_sets = num_valid_sets; - - /* modify the min and max to make sure we cover the whole range*/ - ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - /* This is for writeback only, does not matter currently as no writeback support*/ - ranges->num_writer_wm_sets = 1; - ranges->writer_wm_sets[0].wm_inst = WM_A; - ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - + ASSERT(0); + return 0; } -void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) { - int i; + int i, j = 0; + + j = -1; ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); - for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) { - if (clock_table->FClocks[i].Freq == 0) + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { + if (clock_table->FClocks[i].Freq != 0) { + j = i; break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; - bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq; - bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq; - bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq; - bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol; + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; + bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; + bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->clk_table.num_entries = i; bw_params->vram_type = asic_id->vram_type; bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; @@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; - if (clock_table->FClocks[i].Freq == 0) { + if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } @@ -534,57 +647,42 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = 3600000; clk_mgr->base.dprefclk_khz = 600000; } else { struct clk_log_info log_info = {0}; /* TODO: Check we get what we expect during bringup */ - clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - clk_mgr->base.dprefclk_khz = s.dprefclk; - - if (clk_mgr->base.dprefclk_khz != 600000) { - clk_mgr->base.dprefclk_khz = 600000; - ASSERT(1); //TODO: Renoir follow up. - } + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) + if (clk_mgr->base.dprefclk_khz == 0) { + ASSERT(clk_mgr->base.dprefclk_khz == 600000); clk_mgr->base.dprefclk_khz = 600000; + } } dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = &rn_bw_params; - if (pp_smu) { + if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); } - /* - * Notify SMU which set of WM should be selected for different ranges of fclk - * On Renoir there is a maximumum of 4 DF pstates supported, could be less - * depending on DDR speed and fused maximum fclk. - */ - if (!debug->disable_pplib_wm_range) { - struct pp_smu_wm_range_sets ranges = {0}; - - build_watermark_ranges(clk_mgr->base.bw_params, &ranges); - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) - pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { + /* enable powerfeatures when displaycount goes to 0 */ + rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); } - - /* enable powerfeatures when displaycount goes to 0 */ - if (!debug->disable_48mhz_pwrdwn) - rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h index aadec06fde10..e4322fa5475b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h @@ -26,11 +26,13 @@ #ifndef __RN_CLK_MGR_H__ #define __RN_CLK_MGR_H__ +#include "clk_mgr.h" +#include "dm_pp_smu.h" + struct rn_clk_registers { uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */ }; - void rn_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 50984c1811bb..cb7c0e8b7e1b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,7 +33,7 @@ #include "mp/mp_12_0_0_sh_mask.h" #define REG(reg_name) \ - (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) @@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis int actual_dispclk_set_mhz = -1; struct dc *core_dc = clk_mgr->base.ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - uint32_t clk = requested_dispclk_khz / 1000; - - if (clk <= 100) - clk = 101; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDispclkFreq, - clk); + requested_dispclk_khz / 1000); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { @@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque { int actual_dcfclk_set_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_dcfclk_set_mhz; actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int { int actual_min_ds_dcfclk_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_min_ds_dcfclk_mhz; actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param( @@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_ { int actual_dppclk_set_mhz = -1; - uint32_t clk = requested_dpp_khz / 1000; - - if (clk <= 100) - clk = 101; - actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, - clk); + requested_dpp_khz / 1000); return actual_dppclk_set_mhz * 1000; } -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count) +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state) { + int disp_count; + + if (state == DCN_PWR_STATE_LOW_POWER) + disp_count = 0; + else + disp_count = 1; + rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDisplayCount, - display_count); + clk_mgr, + VBIOSSMC_MSG_SetDisplayCount, + disp_count); } -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr) +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, - 0); + enable); } void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index da3a49487c6d..ccc01879c9d4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count); -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr); +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count); +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4b8819c27fcd..32f31bf91915 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -194,7 +194,7 @@ static bool create_links( } } - if (!should_destory_link) { + if (dc->config.force_enum_edp || !should_destory_link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, return false; } +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option) +{ + /* OPP FMT dyn expansion updates*/ + int i = 0; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx->stream_res.opp->dyn_expansion = option; + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + } + } +} + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { @@ -765,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) #if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); #endif - dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, dangling_context); +#endif } current_ctx = dc->current_state; @@ -789,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (false == construct(dc, init_params)) goto construct_fail; - /*TODO: separate HW and SW initialization*/ - dc->hwss.init_hw(dc); - full_pipe_count = dc->res_pool->pipe_count; if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) full_pipe_count--; @@ -824,9 +847,24 @@ alloc_fail: return NULL; } +void dc_hardware_init(struct dc *dc) +{ + dc->hwss.init_hw(dc); +} + void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params) { +#ifdef CONFIG_DRM_AMD_DC_HDCP + dc->ctx->cp_psp = init_params->cp_psp; +#endif +} + +void dc_deinit_callbacks(struct dc *dc) +{ +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); +#endif } void dc_destroy(struct dc **dc) @@ -905,15 +943,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -970,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, struct dc_crtc_timing *crtc_timing) { struct timing_generator *tg; + struct stream_encoder *se = NULL; + + struct dc_crtc_timing hw_crtc_timing = {0}; + struct dc_link *link = sink->link; - unsigned int enc_inst, tg_inst; + unsigned int i, enc_inst, tg_inst = 0; + + // Seamless port only support single DP and EDP so far + if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && + sink->sink_signal != SIGNAL_TYPE_EDP) + return false; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (enc_inst >= dc->res_pool->pipe_count) + if (enc_inst == ENGINE_ID_UNKNOWN) return false; - if (enc_inst >= dc->res_pool->stream_enc_count) - return false; + for (i = 0; i < dc->res_pool->stream_enc_count; i++) { + if (dc->res_pool->stream_enc[i]->id == enc_inst) { + + se = dc->res_pool->stream_enc[i]; + + tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( + dc->res_pool->stream_enc[i]); + break; + } + } - tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg( - dc->res_pool->stream_enc[enc_inst]); + // tg_inst not found + if (i == dc->res_pool->stream_enc_count) + return false; if (tg_inst >= dc->res_pool->timing_generator_count) return false; tg = dc->res_pool->timing_generators[tg_inst]; - if (!tg->funcs->is_matching_timing) + if (!tg->funcs->get_hw_timing) + return false; + + if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) + return false; + + if (crtc_timing->h_total != hw_crtc_timing.h_total) + return false; + + if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) return false; - if (!tg->funcs->is_matching_timing(tg, crtc_timing)) + if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) + return false; + + if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) + return false; + + if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) + return false; + + if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) + return false; + + if (crtc_timing->v_total != hw_crtc_timing.v_total) + return false; + + if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) + return false; + + if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) + return false; + + if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) + return false; + + if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) + return false; + + if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) return false; if (dc_is_dp_signal(link->connector_signal)) { @@ -1016,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; + if (!se->funcs->dp_get_pixel_format) + return false; + + if (!se->funcs->dp_get_pixel_format( + se, + &hw_crtc_timing.pixel_encoding, + &hw_crtc_timing.display_color_depth)) + return false; + + if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) + return false; + + if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) + return false; } return true; @@ -1077,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mode_changed) - continue; + if (dc->hwss.apply_ctx_for_surface) + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->mode_changed) + continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); /* use new pipe config in new context */ - } + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* use new pipe config in new context */ + } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1104,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* * enable stereo @@ -1140,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (!dc->optimize_seamless_boot) - /* pplib is notified if disp_num changed */ - dc->hwss.optimize_bandwidth(dc, context); - for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; - memset(&context->commit_hints, 0, sizeof(context->commit_hints)); - dc_release_state(dc->current_state); dc->current_state = context; @@ -1496,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - update_flags->raw = 0; // Reset all flags - if (u->flip_addr) update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface)) { - update_flags->bits.new_plane = 1; + if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; } - if (u->surface->force_full_update) { - update_flags->bits.full_update = 1; - return UPDATE_TYPE_FULL; - } + update_flags->raw = 0; // Reset all flags type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1567,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream( enum surface_update_type overall_type = UPDATE_TYPE_FAST; if (stream_status == NULL || stream_status->plane_count != surface_count) - return UPDATE_TYPE_FULL; + overall_type = UPDATE_TYPE_FULL; /* some stream updates require passive update */ if (stream_update) { - if ((stream_update->src.height != 0) && - (stream_update->src.width != 0)) - return UPDATE_TYPE_FULL; + union stream_update_flags *su_flags = &stream_update->stream->update_flags; - if ((stream_update->dst.height != 0) && - (stream_update->dst.width != 0)) - return UPDATE_TYPE_FULL; + if ((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0)) + su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) - return UPDATE_TYPE_FULL; + su_flags->bits.out_tf = 1; if (stream_update->abm_level) - return UPDATE_TYPE_FULL; + su_flags->bits.abm_level = 1; if (stream_update->dpms_off) - return UPDATE_TYPE_FULL; + su_flags->bits.dpms_off = 1; + + if (stream_update->gamut_remap) + su_flags->bits.gamut_remap = 1; #if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) - return UPDATE_TYPE_FULL; + su_flags->bits.wb_update = 1; #endif + if (su_flags->raw != 0) + overall_type = UPDATE_TYPE_FULL; + + if (stream_update->output_csc_transform || stream_update->output_color_space) + su_flags->bits.out_csc = 1; } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = det_surface_update(dc, &updates[i]); - if (type == UPDATE_TYPE_FULL) - return type; - elevate_update_type(&overall_type, type); } @@ -1622,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream( int i; enum surface_update_type type; + if (stream_update) + stream_update->stream->update_flags.raw = 0; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) + if (type == UPDATE_TYPE_FULL) { + if (stream_update) + stream_update->stream->update_flags.raw = 0xFFFFFFFF; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } - if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) - dc->optimized_required = true; + if (type == UPDATE_TYPE_FAST) { + // If there's an available clock comparator, we use that. + if (dc->clk_mgr->funcs->are_clock_states_equal) { + if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) + dc->optimized_required = true; + // Else we fallback to mem compare. + } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { + dc->optimized_required = true; + } + } return type; } @@ -1872,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc, struct dc_state *context) { int j; + bool should_program_abm; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -1952,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { - if (pipe_ctx->stream_res.tg->funcs->is_blanked) { - // if otg funcs defined check if blanked before programming - if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = true; + + // if otg funcs defined check if blanked before programming + if (pipe_ctx->stream_res.tg->funcs->is_blanked) + if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = false; + + if (should_program_abm) { + if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + } else { pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); - } else - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); + } + } } } } @@ -2004,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc, * In case of turning off screen, no need to program front end a second time. * just return after program blank. */ - dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif + return; } @@ -2064,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc, stream_status = stream_get_status(context, pipe_ctx->stream); - dc->hwss.apply_ctx_for_surface( + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); } } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca20b150afcc..12ba6fdf89b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -79,7 +79,6 @@ static void destruct(struct dc_link *link) int i; if (link->hpd_gpio != NULL) { - dal_gpio_close(link->hpd_gpio); dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } -static void read_edp_current_link_settings_on_detect(struct dc_link *link) +static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = { {0} }; uint8_t link_bw_set; @@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); } } else { link->cur_link_settings.link_rate = link_bw_set; @@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } -bool wait_for_alt_mode(struct dc_link *link) +static bool wait_for_alt_mode(struct dc_link *link) { /** @@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link) * This does not create remote sinks but will trigger DM * to start MST detection if a branch is detected. */ -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +static bool dc_link_detect_helper(struct dc_link *link, + enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; @@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) struct dpcd_caps prev_dpcd_caps; bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; + bool perform_dp_seamless_boot = false; + DC_LOGGER_INIT(link->ctx->logger); if (dc_is_virtual_signal(link->connector_signal)) @@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { - read_edp_current_link_settings_on_detect(link); + read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); - sink_caps.transaction_type = - DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { + /* wa HPD high coming too early*/ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { @@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * empty which leads to allocate_mst_payload() has "0" * pbn_per_slot value leading to exception on dc_fixpt_div() */ - link->verified_link_cap = link->reported_link_cap; + dp_verify_mst_link_cap(link); + if (prev_sink != NULL) dc_sink_release(prev_sink); return false; } + // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. + if (reason == DETECT_REASON_BOOT && + dc_ctx->dc->config.power_down_display_on_boot == false && + link->link_status.link_active == true) + perform_dp_seamless_boot = true; + + if (perform_dp_seamless_boot) { + read_current_link_settings_on_detect(link); + link->verified_link_cap = link->reported_link_cap; + } + break; } @@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * two link trainings */ - /* deal with non-mst cases */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); + // verify link cap for SST non-seamless boot + if (!perform_dp_seamless_boot) + dp_verify_link_cap_with_retries(link, + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) dc_sink_release(prev_sink); return true; + +} + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + const struct dc *dc = link->dc; + bool ret; + + /* get out of low power state */ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + + ret = dc_link_detect_helper(link, reason); + + /* Go back to power optimized state */ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + + return ret; } bool dc_link_get_hpd_state(struct dc_link *dc_link) @@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (!apply_seamless_boot_optimization) + if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); dp_enable_link_phy( @@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_set_fec_ready(link, false); } #endif - } else - link->link_enc->funcs->disable_output(link->link_enc, signal); + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* MST disable link only when no stream use the link */ @@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link) return true; } -bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) +bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) { struct dc *core_dc = link->ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) - dmcu->funcs->set_psr_enable(dmcu, enable, wait); + + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); + + link->psr_allow_active = allow_active; return true; } +bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) +{ + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + if (dmcu != NULL && link->psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, psr_state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(enum transmitter transmitter_value) +{ + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + WARN_ONCE(1, "Unknown transmitter value %d\n", + transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool dc_link_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *core_dc; + struct dmcu *dmcu; + int i; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + core_dc = link->ctx->dc; + dmcu = core_dc->res_pool->dmcu; + + if (!dmcu) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (psr_config->psr_version == 0x2) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (core_dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + core_dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = + transmitter_to_phy_id(link->link_enc->transmitter); + + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) + /*skip power down the single pipe since it blocks the cstate*/ + if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_feature_enabled == 0) + ASSERT(0); + + return true; + +} + const struct dc_link_status *dc_link_get_status(const struct dc_link *link) { return &link->link_status; @@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table( /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm */ -static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; @@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; uint8_t i; + enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count @@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) &link->mst_stream_alloc_table); /* send down message */ - dm_helpers_dp_mst_poll_for_allocation_change_trigger( + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + if (cp_psp && cp_psp->funcs.update_stream_config) { + struct cp_psp_stream_config config; + + memset(&config, 0, sizeof(config)); + + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; + config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; + config.dpms_off = dpms_off; + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); + } +} +#endif void core_link_enable_stream( struct dc_state *state, @@ -2677,6 +2927,10 @@ void core_link_enable_stream( enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { stream->link->link_enc->funcs->setup( stream->link->link_enc, @@ -2727,6 +2981,9 @@ void core_link_enable_stream( /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2734,6 +2991,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2786,13 +3046,16 @@ void core_link_enable_stream( #endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); + dc_link_allocate_mst_payload(pipe_ctx); core_dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) @@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, true); +#endif + core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 51991bf26a93..7f904d55c1bc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -508,7 +508,7 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size) { - bool ret; + bool ret = false; uint32_t payload_size = dal_ddc_service_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; @@ -527,34 +527,32 @@ bool dal_ddc_service_query_ddc_data( /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { - struct aux_payload write_payload = { - .i2c_over_aux = true, - .write = true, - .mot = true, - .address = address, - .length = write_size, - .data = write_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - struct aux_payload read_payload = { - .i2c_over_aux = true, - .write = false, - .mot = false, - .address = address, - .length = read_size, - .data = read_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - ret = dc_link_aux_transfer_with_retries(ddc, &write_payload); + struct aux_payload payload; + bool read_available = true; + + payload.i2c_over_aux = true; + payload.address = address; + payload.reply = NULL; + payload.defer_delay = get_defer_delay(ddc); + + if (write_size != 0) { + payload.write = true; + payload.mot = false; + payload.length = write_size; + payload.data = write_buf; + + ret = dal_ddc_submit_aux_command(ddc, &payload); + read_available = ret; + } - if (!ret) - return false; + if (read_size != 0 && read_available) { + payload.write = false; + payload.mot = false; + payload.length = read_size; + payload.data = read_buf; - ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); + ret = dal_ddc_submit_aux_command(ddc, &payload); + } } else { struct i2c_payloads *payloads = dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); @@ -585,6 +583,41 @@ bool dal_ddc_service_query_ddc_data( return ret; } +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) +{ + uint8_t retrieved = 0; + bool ret = 0; + + if (!ddc) + return false; + + if (!payload) + return false; + + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) > + payload->length ? true : false; + + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; + current_payload.mot = !is_end_of_payload; + current_payload.reply = payload->reply; + current_payload.write = payload->write; + + ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); + + retrieved += current_payload.length; + } while (retrieved < payload->length && ret == true); + + return ret; +} + /* dc_link_aux_transfer_raw() - Attempt to transfer * the given aux payload. This function does not perform * retries or handle error states. The reply is returned @@ -613,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, return dce_aux_transfer_with_retries(ddc, payload); } + +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout) +{ + enum dc_status status = DC_OK; + struct ddc *ddc_pin = ddc->ddc_pin; + + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) + return DC_ERROR_UNEXPECTED; + if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) + status = DC_ERROR_UNEXPECTED; + return status; +} + /*test only function*/ void dal_ddc_service_set_ddc_pin( struct ddc_service *ddc_service, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f5742719b5d9..0f59b68aa4c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (link->link_enc->funcs->get_max_link_cap) + link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) max_link_cap.lane_count = @@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries( for (i = 0; i < attempts; i++) { int fail_count = 0; - enum dc_connection_type type; + enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type)) { + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { + link->verified_link_cap.lane_count = LANE_COUNT_ONE; + link->verified_link_cap.link_rate = LINK_RATE_LOW; + link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; } else if (dp_verify_link_cap(link, &link->reported_link_cap, @@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries( return success; } +bool dp_verify_mst_link_cap( + struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + + max_link_cap = get_max_link_cap(link); + link->verified_link_cap = get_common_supported_link_settings( + link->reported_link_cap, + max_link_cap); + + return true; +} + static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b) @@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link) return false; } -static bool handle_hpd_irq_psr_sink(const struct dc_link *link) +static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; - if (!link->psr_enabled) + if (!link->psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( @@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_enable(link, false, true); - dc_link_set_psr_enable(link, true, true); + dc_link_set_psr_allow_active(link, false, true); + dc_link_set_psr_allow_active(link, true, true); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd enum dc_status result; bool status = false; + struct pipe_ctx *pipe_ctx; + int i; if (out_link_loss) *out_link_loss = false; @@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd &link->cur_link_settings, true, LINK_TRAINING_ATTEMPTS); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_link_allocate_mst_payload(pipe_ctx); + } + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -2545,6 +2575,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 79438c4f1e20..a519dbc5ecb6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, if (pipes[i].stream != NULL && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL) { + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f25ac17f47fa..37698305a2dc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rect_integer_multiples(struct rect src, struct rect dest) +static bool are_rects_integer_multiples(struct rect src, struct rect dest) { if (dest.width >= src.width && dest.width % src.width == 0 && dest.height >= src.height && dest.height % src.height == 0) @@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest) return false; } + +static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +{ + if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) + return; + + //for Centered Mode + if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width && + pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { + // calculate maximum # of replication of src onto addressable + unsigned int integer_multiple = min( + pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, + pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); + + //scale dst + pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; + pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; + + //center dst onto addressable + pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; + pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; + } + + //disable taps if src & dst are integer ratio + if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { + pipe_ctx->plane_state->scaling_quality.v_taps = 1; + pipe_ctx->plane_state->scaling_quality.h_taps = 1; + pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; + pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; + } +} + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); + calculate_integer_scaling(pipe_ctx); + calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); - if (res && - plane_state->scaling_quality.integer_scaling && - are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport, - pipe_ctx->plane_res.scl_data.recout)) { - pipe_ctx->plane_res.scl_data.taps.v_taps = 1; - pipe_ctx->plane_res.scl_data.taps.h_taps = 1; - } if (!res) { /* Try 24 bpp linebuffer */ @@ -1635,7 +1662,8 @@ static int acquire_first_free_pipe( static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, - enum engine_id id) + enum engine_id id, + enum dce_version dc_version) { int i, available_audio_count; @@ -1854,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state( struct dc_stream_state *stream) { struct dc_link *link = stream->link; - unsigned int inst, tg_inst; + unsigned int i, inst, tg_inst = 0; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return -1; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (inst >= pool->pipe_count) - return -1; + if (inst == ENGINE_ID_UNKNOWN) + return false; - if (inst >= pool->stream_enc_count) - return -1; + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i]->id == inst) { + tg_inst = pool->stream_enc[i]->funcs->dig_source_otg( + pool->stream_enc[i]); + break; + } + } - tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]); + // tg_inst not found + if (i == pool->stream_enc_count) + return false; if (tg_inst >= pool->timing_generator_count) return false; @@ -1971,7 +1999,7 @@ enum dc_status resource_map_pool_resources( dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( - &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); /* * Audio assigned in order first come first get. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bf1d7bb90e0f..bb09243758fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc, if (dwb->funcs->is_enabled(dwb)) { /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info); + dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); } else { /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info); + dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a82352a87808..0416a17b0897 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.48" +#define DC_VER "3.2.56" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -111,19 +111,20 @@ struct dc_caps { bool force_dp_tps4_for_cp2520; bool disable_dp_clk_share; bool psp_setup_panel_mode; + bool extended_aux_timeout_support; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; #endif struct dc_plane_cap planes[MAX_PLANES]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa { +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool no_connect_phy_config; bool dedcn20_305_wa; +#endif bool skip_clock_update; }; -#endif struct dc_dcc_surface_param { struct dc_size surface_size; @@ -219,7 +220,9 @@ struct dc_config { bool allow_seamless_boot_optimization; bool power_down_display_on_boot; bool edp_not_connected; + bool force_enum_edp; bool forced_clocks; + bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; }; @@ -227,6 +230,7 @@ enum visual_confirm { VISUAL_CONFIRM_DISABLE = 0, VISUAL_CONFIRM_SURFACE = 1, VISUAL_CONFIRM_HDR = 2, + VISUAL_CONFIRM_MPCTREE = 4, }; enum dcc_option { @@ -245,6 +249,19 @@ enum wm_report_mode { WM_REPORT_DEFAULT = 0, WM_REPORT_OVERRIDE = 1, }; +enum dtm_pstate{ + dtm_level_p0 = 0,/*highest voltage*/ + dtm_level_p1, + dtm_level_p2, + dtm_level_p3, + dtm_level_p4,/*when active_display_count = 0*/ +}; + +enum dcn_pwr_state { + DCN_PWR_STATE_UNKNOWN = -1, + DCN_PWR_STATE_MISSION_MODE = 0, + DCN_PWR_STATE_LOW_POWER = 3, +}; /* * For any clocks that may differ per pipe @@ -252,11 +269,7 @@ enum wm_report_mode { */ struct dc_clocks { int dispclk_khz; - int max_supported_dppclk_khz; - int max_supported_dispclk_khz; int dppclk_khz; - int bw_dppclk_khz; /*a copy of dppclk_khz*/ - int bw_dispclk_khz; int dcfclk_khz; int socclk_khz; int dcfclk_deep_sleep_khz; @@ -264,12 +277,17 @@ struct dc_clocks { int phyclk_khz; int dramclk_khz; bool p_state_change_support; - + enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of * optimization required */ bool prev_p_state_change_support; + enum dtm_pstate dtm_level; + int max_supported_dppclk_khz; + int max_supported_dispclk_khz; + int bw_dppclk_khz; /*a copy of dppclk_khz*/ + int bw_dispclk_khz; }; struct dc_bw_validation_profile { @@ -347,6 +365,7 @@ struct dc_debug_options { bool disable_hubp_power_gate; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; + int dsc_min_slice_height_override; #endif bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; @@ -462,9 +481,7 @@ struct dc { struct dc_config config; struct dc_debug_options debug; struct dc_bounding_box_overrides bb_overrides; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa work_arounds; -#endif struct dc_context *ctx; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; @@ -553,10 +570,16 @@ struct dc_init_data { }; struct dc_callback_init { +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#else uint8_t reserved; +#endif }; struct dc *dc_create(const struct dc_init_data *init_params); +void dc_hardware_init(struct dc *dc); + int dc_get_vmid_use_vector(struct dc *dc); #ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); @@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c #endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); +void dc_deinit_callbacks(struct dc *dc); void dc_destroy(struct dc **dc); /******************************************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 4ef97f65e55d..4f8f576d5fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -49,7 +49,8 @@ enum aux_channel_operation_result { AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN, AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY, AUX_CHANNEL_OPERATION_FAILED_TIMEOUT, - AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON + AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON, + AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 6e42209f0e20..0ed2962add5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -30,6 +30,7 @@ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 #define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 +#include "dc_types.h" struct dc_dsc_bw_range { uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */ @@ -39,13 +40,21 @@ struct dc_dsc_bw_range { uint32_t stream_kbps; /* Uncompressed stream bandwidth */ }; +struct display_stream_compressor { + const struct dsc_funcs *funcs; +#ifndef AMD_EDID_UTILITY + struct dc_context *ctx; + int inst; +#endif +}; bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_kbps, const uint32_t max_kbps, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 0b8700a8a94a..e0856bb8511f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,6 +26,8 @@ #ifndef DC_HW_TYPES_H #define DC_HW_TYPES_H +#ifndef AMD_EDID_UTILITY + #include "os_types.h" #include "fixed31_32.h" #include "signal_types.h" @@ -124,20 +126,6 @@ struct plane_size { int chroma_pitch; struct rect surface_size; struct rect chroma_size; - - union { - struct { - struct rect surface_size; - int surface_pitch; - } grph; - - struct { - struct rect luma_size; - int luma_pitch; - struct rect chroma_size; - int chroma_pitch; - } video; - }; }; struct dc_plane_dcc_param { @@ -148,21 +136,6 @@ struct dc_plane_dcc_param { int meta_pitch_c; bool independent_64b_blks_c; - - union { - struct { - int meta_pitch; - bool independent_64b_blks; - } grph; - - struct { - int meta_pitch_l; - bool independent_64b_blks_l; - - int meta_pitch_c; - bool independent_64b_blks_c; - } video; - }; }; /*Displayable pixel format in fb*/ @@ -605,6 +578,11 @@ enum dc_quantization_range { QUANTIZATION_RANGE_LIMITED }; +enum dc_dynamic_expansion { + DYN_EXPANSION_AUTO, + DYN_EXPANSION_DISABLE +}; + /* XFM */ /* used in struct dc_plane_state */ @@ -616,6 +594,8 @@ struct scaling_taps { bool integer_scaling; }; +#endif /* AMD_EDID_UTILITY */ + enum dc_timing_standard { DC_TIMING_STANDARD_UNDEFINED, DC_TIMING_STANDARD_DMT, @@ -737,30 +717,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -enum trigger_delay { - TRIGGER_DELAY_NEXT_PIXEL = 0, - TRIGGER_DELAY_NEXT_LINE, -}; - -enum crtc_event { - CRTC_EVENT_VSYNC_RISING = 0, - CRTC_EVENT_VSYNC_FALLING -}; - -struct crtc_trigger_info { - bool enabled; - struct dc_stream_state *event_source; - enum crtc_event event; - enum trigger_delay delay; -}; - -struct dc_crtc_timing_adjust { - uint32_t v_total_min; - uint32_t v_total_max; - uint32_t v_total_mid; - uint32_t v_total_mid_frame_num; -}; - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ @@ -804,6 +760,33 @@ struct dc_crtc_timing { #endif }; +#ifndef AMD_EDID_UTILITY + +enum trigger_delay { + TRIGGER_DELAY_NEXT_PIXEL = 0, + TRIGGER_DELAY_NEXT_LINE, +}; + +enum crtc_event { + CRTC_EVENT_VSYNC_RISING = 0, + CRTC_EVENT_VSYNC_FALLING +}; + +struct crtc_trigger_info { + bool enabled; + struct dc_stream_state *event_source; + enum crtc_event event; + enum trigger_delay delay; +}; + +struct dc_crtc_timing_adjust { + uint32_t v_total_min; + uint32_t v_total_max; + uint32_t v_total_mid; + uint32_t v_total_mid_frame_num; +}; + + /* Passed on init */ enum vram_type { VIDEO_MEMORY_TYPE_GDDR5 = 2, @@ -874,5 +857,7 @@ struct tg_color { uint16_t color_b_cb; }; +#endif /* AMD_EDID_UTILITY */ + #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9ea75db3484e..f24fd19ed93d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -126,7 +126,8 @@ struct dc_link { unsigned short chip_caps; unsigned int dpcd_sink_count; enum edp_revision edp_revision; - bool psr_enabled; + bool psr_feature_enabled; + bool psr_allow_active; /* MST record stream using this link */ struct link_flags { @@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } +static inline struct dc_link *get_edp_link(const struct dc *dc) +{ + int i; + + // report any eDP links, even unconnected DDI's + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) + return dc->links[i]; + } + return NULL; +} + /* Set backlight level of an embedded panel (eDP, LVDS). * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer * and 16 bit fractional, where 1.0 is max backlight value. @@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link); -bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait); bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); @@ -192,6 +205,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 0fa1c26bc20d..fdb6adc37857 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -113,6 +113,21 @@ struct periodic_interrupt_config { int lines_offset; }; +union stream_update_flags { + struct { + uint32_t scaling:1; + uint32_t out_tf:1; + uint32_t out_csc:1; + uint32_t abm_level:1; + uint32_t dpms_off:1; + uint32_t gamut_remap:1; +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + uint32_t wb_update:1; +#endif + } bits; + + uint32_t raw; +}; struct dc_stream_state { // sink is deprecated, new code should not reference @@ -214,9 +229,14 @@ struct dc_stream_state { #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; #endif + union stream_update_flags update_flags; }; +#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF + struct dc_stream_update { + struct dc_stream_state *stream; + struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; @@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc, int num_streams, const struct dc_static_screen_events *events); +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option); + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b273735b6a3e..d9be8fc3889f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,6 +25,11 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ +#ifndef AMD_EDID_UTILITY +/* AND EdidUtility only needs a portion + * of this file, including the rest only + * causes additional issues. + */ #include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" @@ -33,6 +38,10 @@ #include "dal_types.h" #include "grph_object_defs.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif + /* forward declarations */ struct dc_plane_state; struct dc_stream_state; @@ -100,6 +109,9 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#endif }; @@ -159,6 +171,12 @@ enum dc_edid_status { EDID_THE_SAME, }; +enum act_return_status { + ACT_SUCCESS, + ACT_LINK_LOST, + ACT_FAILED +}; + /* audio capability from EDID*/ struct dc_cea_audio_mode { uint8_t format_code; /* ucData[0] [6:3]*/ @@ -739,6 +757,9 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; +#endif /*AMD_EDID_UTILITY*/ +//AMD EDID UTILITY does not need any of the above structures + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { @@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_max_line_width; }; #endif + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 58bd131d5b48..b8a3fc505c9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + return true; } @@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm) /* Enable the backlight output */ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); + /* Disable fractional pwm if configured */ + REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, + abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1); + /* Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); @@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); - if (abm_dce->base.dmcu_is_running == true) - abm_dce->base.funcs->set_abm_immediate_disable(*abm); - kfree(abm_dce); *abm = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8..e472608faf33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,6 +42,10 @@ #include "reg_helper.h" +#undef FN +#define FN(reg_name, field_name) \ + aux110->shift->field_name, aux110->mask->field_name + #define FROM_AUX_ENGINE(ptr) \ container_of((ptr), struct aux_engine_dce110, base) @@ -55,6 +59,14 @@ enum { AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; + +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 + static void release_engine( struct dce_aux *engine) { @@ -198,7 +210,7 @@ static void submit_channel_request( REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); /* set the delay and the number of bytes to write */ @@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status( /* poll to make sure that SW_DONE is asserted */ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ @@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine) *engine = NULL; } + +static bool dce_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout_in_us) +{ + uint32_t multiplier = 0; + uint32_t length = 0; + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + + /* 1-Update polling timeout period */ + aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; + + /* 2-Update aux timeout period length and multiplier */ + if (timeout_in_us <= TIME_OUT_INCREMENT) { + multiplier = 0; + length = timeout_in_us/TIME_OUT_MULTIPLIER_8; + if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) + length++; + } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) { + multiplier = 1; + length = timeout_in_us/TIME_OUT_MULTIPLIER_16; + if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0) + length++; + } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) { + multiplier = 2; + length = timeout_in_us/TIME_OUT_MULTIPLIER_32; + if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0) + length++; + } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) { + multiplier = 3; + length = timeout_in_us/TIME_OUT_MULTIPLIER_64; + if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0) + length++; + } + + length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); + + return true; +} + +static struct dce_aux_funcs aux_functions = { + .configure_timeout = NULL, + .destroy = NULL, +}; + struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs) + const struct dce110_aux_registers *regs, + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable) { aux_engine110->base.ddc = NULL; aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; aux_engine110->base.inst = inst; - aux_engine110->timeout_period = timeout_period; + aux_engine110->polling_timeout_period = timeout_period; aux_engine110->regs = regs; + aux_engine110->mask = mask; + aux_engine110->shift = shift; + aux_engine110->base.funcs = &aux_functions; + if (is_ext_aux_timeout_configurable) + aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout; + return &aux_engine110->base; } @@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, memset(&aux_rep, 0, sizeof(aux_rep)); aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - if (!acquire(aux_engine, ddc_pin)) + if (!acquire(aux_engine, ddc_pin)) { + *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE; return -1; + } if (payload->i2c_over_aux) aux_req.type = AUX_TRANSACTION_TYPE_I2C; @@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, aux_req.action = i2caux_action_from_payload(payload); aux_req.address = payload->address; - aux_req.delay = payload->defer_delay * 10; + aux_req.delay = 0; aux_req.length = payload->length; aux_req.data = payload->data; @@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; + } else { + if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || + (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { + if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } + } break; case AUX_TRANSACTION_REPLY_I2C_DEFER: @@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: default: goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index ed7fec8fe253..b4b2c79a8073 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -29,6 +29,7 @@ #include "i2caux_interface.h" #include "inc/hw/aux_engine.h" + #ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -36,6 +37,7 @@ SRI(AUX_SW_DATA, DP_AUX, id), \ SRI(AUX_SW_CONTROL, DP_AUX, id), \ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) #endif @@ -55,6 +57,7 @@ struct dce110_aux_registers { uint32_t AUX_SW_DATA; uint32_t AUX_SW_CONTROL; uint32_t AUX_INTERRUPT_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL1; uint32_t AUX_SW_STATUS; uint32_t AUXN_IMPCAL; uint32_t AUXP_IMPCAL; @@ -62,6 +65,156 @@ struct dce110_aux_registers { uint32_t AUX_RESET_MASK; }; +#define DCE_AUX_REG_FIELD_LIST(type)\ + type AUX_EN;\ + type AUX_RESET;\ + type AUX_RESET_DONE;\ + type AUX_REG_RW_CNTL_STATUS;\ + type AUX_SW_USE_AUX_REG_REQ;\ + type AUX_SW_DONE_USING_AUX_REG;\ + type AUX_SW_AUTOINCREMENT_DISABLE;\ + type AUX_SW_DATA_RW;\ + type AUX_SW_INDEX;\ + type AUX_SW_GO;\ + type AUX_SW_DATA;\ + type AUX_SW_REPLY_BYTE_COUNT;\ + type AUX_SW_DONE;\ + type AUX_SW_DONE_ACK;\ + type AUXN_IMPCAL_ENABLE;\ + type AUXP_IMPCAL_ENABLE;\ + type AUXN_IMPCAL_OVERRIDE_ENABLE;\ + type AUXP_IMPCAL_OVERRIDE_ENABLE;\ + type AUX_RX_TIMEOUT_LEN;\ + type AUX_RX_TIMEOUT_LEN_MUL;\ + type AUXN_CALOUT_ERROR_AK;\ + type AUXP_CALOUT_ERROR_AK;\ + type AUX_SW_START_DELAY;\ + type AUX_SW_WR_BYTES + +#define DCE10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE12_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* DCN10 MASK */ +#define DCN10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* for all other DCN */ +#define DCN_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh) + +#define AUX_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + enum { /* This is the timeout as defined in DP 1.2a, * 2.3.4 "Detailed uPacket TX AUX CH State Description". */ @@ -97,20 +250,34 @@ struct dce_aux { uint32_t max_defer_write_retry; bool acquire_reset; + struct dce_aux_funcs *funcs; +}; + +struct dce110_aux_registers_mask { + DCE_AUX_REG_FIELD_LIST(uint32_t); +}; + +struct dce110_aux_registers_shift { + DCE_AUX_REG_FIELD_LIST(uint8_t); }; + struct aux_engine_dce110 { struct dce_aux base; const struct dce110_aux_registers *regs; + const struct dce110_aux_registers_mask *mask; + const struct dce110_aux_registers_shift *shift; struct { uint32_t aux_control; uint32_t aux_arb_control; uint32_t aux_sw_data; uint32_t aux_sw_control; uint32_t aux_interrupt_control; + uint32_t aux_dphy_rx_control1; + uint32_t aux_dphy_rx_control0; uint32_t aux_sw_status; } addr; - uint32_t timeout_period; + uint32_t polling_timeout_period; }; struct aux_engine_dce110_init_data { @@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data { const struct dce110_aux_registers *regs; }; -struct dce_aux *dce110_aux_engine_construct( - struct aux_engine_dce110 *aux_engine110, +struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs); + const struct dce110_aux_registers *regs, + + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable); void dce110_engine_destroy(struct dce_aux **engine); @@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); + +struct dce_aux_funcs { + bool (*configure_timeout) + (struct ddc_service *ddc, + uint32_t timeout); + void (*destroy) + (struct aux_engine **ptr); +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 0b86cee4876f..ba995d3f2318 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); - if (dmcu_dce->base.dmcu_state == DMCU_RUNNING) - dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true); - kfree(dmcu_dce); *dmcu = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index ac04d77058f0..32d145a0d6fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -679,6 +679,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 31b698bf9cfc..8aa937f496c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif( } if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } @@ -636,11 +636,11 @@ static void dce_mi_free_dmif( 10, 3500); if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 76d54885374a..a5e122c721ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = { .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) @@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -997,6 +1043,8 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; + dc->caps.extended_aux_timeout_support = false; + for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 01a924bf477a..f0e837d14000 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ struct dc *core_dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; @@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; - if (core_dc->res_pool->pp_smu) - pp_smu = core_dc->res_pool->pp_smu; - if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ @@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) { struct dc *dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; if (!pipe_ctx || !pipe_ctx->stream) @@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; - if (dc->res_pool->pp_smu) - pp_smu = dc->res_pool->pp_smu; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1169,8 +1161,9 @@ static void build_audio_output( } } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (state->clk_mgr && + (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { audio_output->pll_info.dp_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); @@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->link->psr_enabled = false; + pipe_ctx->stream->link->psr_feature_enabled = false; return DC_OK; } @@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { int i; - enum connector_id connector_id; - enum signal_type signal = SIGNAL_TYPE_NONE; /* do not know BIOS back-front mapping, simply blank all. It will not * hurt for non-DP @@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc) } for (i = 0; i < dc->link_count; i++) { - connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id); - if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || - (connector_id == CONNECTOR_ID_EDP)) { + enum signal_type signal = dc->links[i]->connector_signal; + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); - if (connector_id == CONNECTOR_ID_EDP) - signal = SIGNAL_TYPE_EDP; - } dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); @@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context) return NULL; } -static struct dc_link *get_edp_link(struct dc *dc) -{ - int i; - - // report any eDP links, even unconnected DDI's - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) - return dc->links[i]; - } - return NULL; -} - static struct dc_link *get_edp_link_with_sink( struct dc *dc, struct dc_state *context) @@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc, return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->link->psr_enabled) + if (pipe_ctx->stream->link->psr_feature_enabled) return false; /* Nothing to compress */ @@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; - struct pipe_ctx *old_pipe = NULL; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; @@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe( DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); - if (dc->current_state) - old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 89620adc81d8..83a4dbf6d76e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ @@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1293,6 +1339,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 21a657e79306..97dcc5d0862b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ @@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1163,7 +1209,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; - + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 7c52f7f9196c..63543f6918ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE12_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE12_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ @@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ @@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); @@ -1006,7 +1052,7 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; - + dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 643ccb0ade00..3e8d4b49f279 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ @@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -895,6 +941,7 @@ static bool dce80_construct( dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index d8b2da18db39..997e9582edc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) -static bool dpp_get_optimal_number_of_taps( +bool dpp1_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, .dpp_set_csc_default = dpp1_cm_set_output_csc_default, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index e2c613611ac9..1d4a7d640334 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + void dpp1_construct(struct dcn10_dpp *dpp1, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 001db49e4bb2..14d1be6c66e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -841,6 +841,14 @@ void min_set_viewport( REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, PRI_VIEWPORT_X_START_C, viewport_c->x, PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); } void hubp1_read_state_common(struct hubp *hubp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index cb20d10288c0..ae70d9c0aa1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -47,6 +47,8 @@ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ @@ -57,8 +59,12 @@ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\ @@ -150,6 +156,8 @@ uint32_t DCSURF_SEC_VIEWPORT_START; \ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \ uint32_t DCSURF_PRI_VIEWPORT_START_C; \ + uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \ + uint32_t DCSURF_SEC_VIEWPORT_START_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \ @@ -160,8 +168,12 @@ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \ uint32_t DCSURF_SURFACE_INUSE; \ uint32_t DCSURF_SURFACE_INUSE_HIGH; \ uint32_t DCSURF_SURFACE_INUSE_C; \ @@ -279,6 +291,10 @@ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\ @@ -289,8 +305,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\ @@ -469,6 +489,10 @@ type PRI_VIEWPORT_HEIGHT_C; \ type PRI_VIEWPORT_X_START_C; \ type PRI_VIEWPORT_Y_START_C; \ + type SEC_VIEWPORT_WIDTH_C; \ + type SEC_VIEWPORT_HEIGHT_C; \ + type SEC_VIEWPORT_X_START_C; \ + type SEC_VIEWPORT_Y_START_C; \ type PRIMARY_SURFACE_ADDRESS_HIGH;\ type PRIMARY_SURFACE_ADDRESS;\ type SECONDARY_SURFACE_ADDRESS_HIGH;\ @@ -479,8 +503,12 @@ type SECONDARY_META_SURFACE_ADDRESS;\ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_SURFACE_ADDRESS_C;\ + type SECONDARY_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_SURFACE_ADDRESS_C;\ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_META_SURFACE_ADDRESS_C;\ + type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_META_SURFACE_ADDRESS_C;\ type SURFACE_INUSE_ADDRESS;\ type SURFACE_INUSE_ADDRESS_HIGH;\ type SURFACE_INUSE_ADDRESS_C;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 60123db7ba02..eb91432621ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc) int i; bool allow_self_fresh_force_enable = true; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) + return; +#endif if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc) } dc->hwss.enable_power_gating_plane(dc->hwseq, true); + + if (dc->clk_mgr->funcs->notify_wm_ranges) + dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); + } static void dcn10_reset_hw_ctx_wrap( @@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx, DC_LOG_ALL_TF_CHANNELS("Logging all channels..."); for (i = 0; i < hw_points_num; i++) { - DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) { - DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } } @@ -2304,8 +2312,7 @@ void update_dchubp_dpp( dc->res_pool->dccg->funcs->update_dpp_dto( dc->res_pool->dccg, dpp->inst, - pipe_ctx->plane_res.bw.dppclk_khz, - false); + pipe_ctx->plane_res.bw.dppclk_khz); else dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? dc->clk_mgr->clks.dispclk_khz / 2 : @@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 8bf5f0f2301d..88fcc395adf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -113,6 +113,20 @@ struct dcn10_link_enc_registers { uint32_t DIG_LANE_ENABLE; /* UNIPHY */ uint32_t CHANNEL_XBAR_CNTL; + /* DPCS */ + uint32_t RDPCSTX_PHY_CNTL3; + uint32_t RDPCSTX_PHY_CNTL4; + uint32_t RDPCSTX_PHY_CNTL5; + uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSTX_PHY_CNTL7; + uint32_t RDPCSTX_PHY_CNTL8; + uint32_t RDPCSTX_PHY_CNTL9; + uint32_t RDPCSTX_PHY_CNTL10; + uint32_t RDPCSTX_PHY_CNTL11; + uint32_t RDPCSTX_PHY_CNTL12; + uint32_t RDPCSTX_PHY_CNTL13; + uint32_t RDPCSTX_PHY_CNTL14; + uint32_t RDPCSTX_PHY_CNTL15; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -250,6 +264,10 @@ struct dcn10_link_enc_registers { type RDPCS_EXT_REFCLK_EN;\ type RDPCS_TX_FIFO_EN;\ type UNIPHY_LINK_ENABLE;\ + type UNIPHY_CHANNEL0_XBAR_SOURCE;\ + type UNIPHY_CHANNEL1_XBAR_SOURCE;\ + type UNIPHY_CHANNEL2_XBAR_SOURCE;\ + type UNIPHY_CHANNEL3_XBAR_SOURCE;\ type UNIPHY_CHANNEL0_INVERT;\ type UNIPHY_CHANNEL1_INVERT;\ type UNIPHY_CHANNEL2_INVERT;\ @@ -337,16 +355,46 @@ struct dcn10_link_enc_registers { type RDPCS_TX_FIFO_ERROR_MASK;\ type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\ type RDPCS_DPALT_4LANE_TOGGLE_MASK;\ + type RDPCS_PHY_DPALT_DP4;\ type RDPCS_PHY_DPALT_DISABLE;\ type RDPCS_PHY_DPALT_DISABLE_ACK;\ type RDPCS_PHY_DP_MPLLB_V2I;\ type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\ + type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\ + type RDPCS_PHY_RX_VREF_CTRL;\ type RDPCS_PHY_DP_MPLLB_CP_INT;\ type RDPCS_PHY_DP_MPLLB_CP_PROP;\ type RDPCS_PHY_RX_REF_LD_VAL;\ type RDPCS_PHY_RX_VCO_LD_VAL;\ type DPCSTX_DEBUG_CONFIG; \ - type RDPCSTX_DEBUG_CONFIG + type RDPCSTX_DEBUG_CONFIG; \ + type RDPCS_PHY_DP_TX0_EQ_MAIN;\ + type RDPCS_PHY_DP_TX0_EQ_PRE;\ + type RDPCS_PHY_DP_TX0_EQ_POST;\ + type RDPCS_PHY_DP_TX1_EQ_MAIN;\ + type RDPCS_PHY_DP_TX1_EQ_PRE;\ + type RDPCS_PHY_DP_TX1_EQ_POST;\ + type RDPCS_PHY_DP_TX2_EQ_MAIN;\ + type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\ + type RDPCS_PHY_DP_TX2_EQ_PRE;\ + type RDPCS_PHY_DP_TX2_EQ_POST;\ + type RDPCS_PHY_DP_TX3_EQ_MAIN;\ + type RDPCS_PHY_DCO_RANGE;\ + type RDPCS_PHY_DCO_FINETUNE;\ + type RDPCS_PHY_DP_TX3_EQ_PRE;\ + type RDPCS_PHY_DP_TX3_EQ_POST;\ + type RDPCS_PHY_SUP_PRE_HP;\ + type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\ + type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\ + type UNIPHYA_SOFT_RESET;\ + type UNIPHYB_SOFT_RESET;\ + type UNIPHYC_SOFT_RESET;\ + type UNIPHYD_SOFT_RESET;\ + type UNIPHYE_SOFT_RESET;\ + type UNIPHYF_SOFT_RESET #define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_LANE0EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index e9ebbbe256b4..0a9ad692f541 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding( REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); @@ -237,6 +240,9 @@ void opp1_set_dyn_expansion( FMT_DYNAMIC_EXP_EN, 0, FMT_DYNAMIC_EXP_MODE, 0); + if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) + return; + /*00 - 10-bit -> 12-bit dynamic expansion*/ /*01 - 8-bit -> 12-bit dynamic expansion*/ if (signal == SIGNAL_TYPE_HDMI_TYPE_A || diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 0f10adea000c..2c0ecfa5a643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -116,6 +116,8 @@ type FMT_RAND_G_SEED; \ type FMT_RAND_B_SEED; \ type FMT_PIXEL_ENCODING; \ + type FMT_SUBSAMPLING_MODE; \ + type FMT_CBCR_BIT_REDUCTION_BYPASS; \ type FMT_CLAMP_DATA_EN; \ type FMT_CLAMP_COLOR_FORMAT; \ type FMT_DYNAMIC_EXP_EN; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index e74a07d03fde..dabccbd49ad4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc) return ret; } -bool optc1_is_matching_timing(struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing) +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing) { - struct dc_crtc_timing hw_crtc_timing = {0}; struct dcn_otg_state s = {0}; - if (tg == NULL || otg_timing == NULL) + if (tg == NULL || hw_crtc_timing == NULL) return false; optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); - hw_crtc_timing.h_total = s.h_total + 1; - hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); - hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start; - hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start; + hw_crtc_timing->h_total = s.h_total + 1; + hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - hw_crtc_timing.v_total = s.v_total + 1; - hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); - hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start; - hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start; - - if (otg_timing->h_total != hw_crtc_timing.h_total) - return false; - - if (otg_timing->h_border_left != hw_crtc_timing.h_border_left) - return false; - - if (otg_timing->h_addressable != hw_crtc_timing.h_addressable) - return false; - - if (otg_timing->h_border_right != hw_crtc_timing.h_border_right) - return false; - - if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch) - return false; - - if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width) - return false; - - if (otg_timing->v_total != hw_crtc_timing.v_total) - return false; - - if (otg_timing->v_border_top != hw_crtc_timing.v_border_top) - return false; - - if (otg_timing->v_addressable != hw_crtc_timing.v_addressable) - return false; - - if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) - return false; - - if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width) - return false; + hw_crtc_timing->v_total = s.v_total + 1; + hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; return true; } @@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, - .is_matching_timing = optc1_is_matching_timing, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, @@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .configure_crc = optc1_configure_crc, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc1_program_manual_trigger, - .setup_manual_trigger = optc1_setup_manual_trigger + .setup_manual_trigger = optc1_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, }; void dcn10_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 83575599672e..c8d795b335ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -547,9 +547,8 @@ struct dcn_otg_state { void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); -bool optc1_is_matching_timing( - struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing); +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing); bool optc1_validate_timing( struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..15640aedd664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN10_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ @@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ @@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create( { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc10) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1308,6 +1345,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + dc->caps.extended_aux_timeout_support = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..06e5bbb4545c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg( return tg_inst; } +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth) +{ + uint32_t hw_encoding = 0; + uint32_t hw_depth = 0; + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (enc == NULL || + encoding == NULL || + depth == NULL) + return false; + + REG_GET_2(DP_PIXEL_FORMAT, + DP_PIXEL_ENCODING, &hw_encoding, + DP_COMPONENT_DEPTH, &hw_depth); + + switch (hw_depth) { + case DP_COMPONENT_PIXEL_DEPTH_6BPC: + *depth = COLOR_DEPTH_666; + break; + case DP_COMPONENT_PIXEL_DEPTH_8BPC: + *depth = COLOR_DEPTH_888; + break; + case DP_COMPONENT_PIXEL_DEPTH_10BPC: + *depth = COLOR_DEPTH_101010; + break; + case DP_COMPONENT_PIXEL_DEPTH_12BPC: + *depth = COLOR_DEPTH_121212; + break; + case DP_COMPONENT_PIXEL_DEPTH_16BPC: + *depth = COLOR_DEPTH_161616; + break; + default: + *depth = COLOR_DEPTH_UNDEFINED; + break; + } + + switch (hw_encoding) { + case DP_PIXEL_ENCODING_TYPE_RGB444: + *encoding = PIXEL_ENCODING_RGB; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR422: + *encoding = PIXEL_ENCODING_YCBCR422; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR444: + case DP_PIXEL_ENCODING_TYPE_Y_ONLY: + *encoding = PIXEL_ENCODING_YCBCR444; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR420: + *encoding = PIXEL_ENCODING_YCBCR420; + break; + default: + *encoding = PIXEL_ENCODING_UNDEFINED; + break; + } + return true; +} + static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dp_set_stream_attribute = enc1_stream_encoder_dp_set_stream_attribute, @@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dig_connect_to_otg = enc1_dig_connect_to_otg, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, }; void dcn10_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..c9cbc21d121e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -621,4 +621,9 @@ void get_audio_clock_info( void enc1_reset_hdmi_stream_attribute( struct stream_encoder *enc); +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed25536..1e1151356e60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -44,16 +44,12 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg2_update_dpp_dto(struct dccg *dccg, - int dpp_inst, - int req_dppclk, - bool reduce_divider_only) +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; - int current_phase, current_modulo; ASSERT(req_dppclk <= ref_dppclk); /* need to clamp to 8 bits */ @@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg, if (req_dppclk > ref_dppclk) req_dppclk = ref_dppclk; } - - REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst], - DPPCLK0_DTO_PHASE, ¤t_phase, - DPPCLK0_DTO_MODULO, ¤t_modulo); - - if (reduce_divider_only) { - // requested phase/modulo greater than current - if (req_dppclk * current_modulo >= current_phase * ref_dppclk) { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, current_phase, - DPPCLK0_DTO_MODULO, current_modulo); - } - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } - + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, req_dppclk, + DPPCLK0_DTO_MODULO, ref_dppclk); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { @@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } } static const struct dccg_funcs dccg2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 74a074a873cd..2205cb0204e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -97,7 +97,7 @@ struct dcn_dccg { const struct dccg_mask *dccg_mask; }; -void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only); +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); void dccg2_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 2f5aade1e882..4d7e45892f08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps( struct scaler_data *scl_data, const struct scaling_taps *in_taps) { - uint32_t pixel_width; - - if (scl_data->viewport.width > scl_data->recout.width) - pixel_width = scl_data->recout.width; - else - pixel_width = scl_data->viewport.width; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && scl_data->viewport.height != scl_data->v_active && @@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_read_state = dpp20_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 290b2854bd2c..5b03b737b1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -30,16 +30,20 @@ #define TO_DCN20_DPP(dpp)\ container_of(dpp, struct dcn20_dpp, base) -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ SRI(CM_BLNDGAM_CONTROL, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ @@ -66,9 +70,6 @@ SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ @@ -147,7 +148,12 @@ SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ SRI(CURSOR_CONTROL, CURSOR0_, id), \ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ @@ -166,27 +172,41 @@ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ SRI(DSCL_MEM_PWR_CTRL, DSCL, id) -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -261,18 +281,9 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -341,9 +352,6 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ @@ -356,7 +364,6 @@ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ @@ -521,9 +528,14 @@ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ @@ -560,6 +572,7 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ @@ -593,6 +606,7 @@ type OBUF_MEM_PWR_FORCE;\ type LUT_MEM_PWR_FORCE + struct dcn2_dpp_shift { TF_REG_FIELD_LIST_DCN2_0(uint8_t); }; @@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - bool dpp2_construct(struct dcn20_dpp *dpp2, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af94..63eb377ed9c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock dsc_enc_caps->color_formats.bits.RGB = 1; dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index cd8bc92ce3ba..880954ac0b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, struct scaling_taps num_taps) { uint32_t h_ratio_luma = 1; - uint32_t h_ratio_chroma = 1; uint32_t h_taps_luma = num_taps.h_taps; uint32_t h_taps_chroma = num_taps.h_taps_c; int32_t h_init_phase_luma = 0; @@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, h_ratio_luma = -1; else h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; - h_ratio_chroma = h_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma); @@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, enum dwb_subsample_position subsample_position) { uint32_t v_ratio_luma = 1; - uint32_t v_ratio_chroma = 1; uint32_t v_taps_luma = num_taps.v_taps; uint32_t v_taps_chroma = num_taps.v_taps_c; int32_t v_init_phase_luma = 0; @@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, v_ratio_luma = -1; else v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; - v_ratio_chroma = v_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index b83c022e2c6f..8b8438566101 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl } static void hubbub2_det_request_size( + unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; @@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, &segment_order_horz, &segment_order_vert)) return false; - hubbub2_det_request_size(input->surface_size.height, input->surface_size.width, + hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { @@ -588,7 +588,7 @@ static void hubbub2_program_watermarks( DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); } static const struct hubbub_funcs hubbub2_funcs = { @@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub2_program_watermarks + .program_watermarks = hubbub2_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control }; void hubbub2_construct(struct dcn20_hubbub *hubbub, @@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 626117d3b4e9..501532dd523a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -81,6 +81,7 @@ struct dcn20_hubbub { unsigned int debug_test_index_pstate; struct dcn_watermark_set watermarks; struct dcn20_vmid vmid[16]; + unsigned int detile_buf_size; }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 1212da12c414..921a36668ced 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static bool dcn20_set_blend_lut( +bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut( return result; } -static bool dcn20_set_shaper_3dlut( +bool dcn20_set_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -999,72 +999,6 @@ void dcn20_enable_plane( } -static void dcn20_program_pipe( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - pipe_ctx->plane_state->update_flags.bits.full_update = - context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update; - - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dcn20_enable_plane(dc, pipe_ctx, context); - - update_dchubp_dpp(dc, pipe_ctx, context); - - set_hdr_multiplier(pipe_ctx); - - if (pipe_ctx->plane_state->update_flags.bits.full_update || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); - - /* dcn10_translate_regamma_to_hw_format takes 750us to finish - * only do gamma programming for full update. - * TODO: This can be further optimized/cleaned up - * Always call this for now since it does memcmp inside before - * doing heavy calculation and programming - */ - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); -} - -static void dcn20_program_all_pipe_in_tree( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) { - bool blank = !is_pipe_tree_visible(pipe_ctx); - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); - - pipe_ctx->stream_res.tg->funcs->set_vtg_params( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); - - if (dc->hwss.update_odm) - dc->hwss.update_odm(dc, context, pipe_ctx); - } - - if (pipe_ctx->plane_state != NULL) - dcn20_program_pipe(dc, pipe_ctx, context); - - if (pipe_ctx->bottom_pipe != NULL) { - ASSERT(pipe_ctx->bottom_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); - } else if (pipe_ctx->next_odm_pipe != NULL) { - ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context); - } -} - void dcn20_pipe_control_lock_global( struct dc *dc, struct pipe_ctx *pipe, @@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock( } } -static void dcn20_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context) +static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) { - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; - int i; - struct timing_generator *tg; - bool removed_pipe[6] = { false }; - bool interdependent_update = false; - struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); - struct pipe_ctx *prev_top_pipe_to_program = - find_top_pipe_for_stream(dc, dc->current_state, stream); - DC_LOGGER_INIT(dc->ctx->logger); + new_pipe->update_flags.raw = 0; - if (!top_pipe_to_program) + /* Exit on unchanged, unused pipe */ + if (!old_pipe->plane_state && !new_pipe->plane_state) return; + /* Detect pipe enable/disable */ + if (!old_pipe->plane_state && new_pipe->plane_state) { + new_pipe->update_flags.bits.enable = 1; + new_pipe->update_flags.bits.mpcc = 1; + new_pipe->update_flags.bits.dppclk = 1; + new_pipe->update_flags.bits.hubp_interdependent = 1; + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.gamut_remap = 1; + new_pipe->update_flags.bits.scaler = 1; + new_pipe->update_flags.bits.viewport = 1; + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + new_pipe->update_flags.bits.odm = 1; + new_pipe->update_flags.bits.global_sync = 1; + } + return; + } + if (old_pipe->plane_state && !new_pipe->plane_state) { + new_pipe->update_flags.bits.disable = 1; + return; + } - /* Carry over GSL groups in case the context is changing. */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - pipe_ctx->stream == old_pipe_ctx->stream) - pipe_ctx->stream_res.gsl_group = - old_pipe_ctx->stream_res.gsl_group; + /* Detect top pipe only changes */ + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + /* Detect odm changes */ + if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) + || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) + || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) + || old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.odm = 1; + + /* Detect global sync changes */ + if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset + || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start + || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset + || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) + new_pipe->update_flags.bits.global_sync = 1; } - tg = top_pipe_to_program->stream_res.tg; + /* + * Detect opp / tg change, only set on change, not on enable + * Assume mpcc inst = pipe index, if not this code needs to be updated + * since mpcc is what is affected by these. In fact all of our sequence + * makes this assumption at the moment with how hubp reset is matched to + * same index mpcc reset. + */ + if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.opp_changed = 1; + if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) + new_pipe->update_flags.bits.tg_changed = 1; + + /* Detect mpcc blending changes, only dpp inst and bot matter here */ + if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp + || old_pipe->stream_res.opp != new_pipe->stream_res.opp + || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && new_pipe->bottom_pipe + && old_pipe->bottom_pipe->plane_res.mpcc_inst + != new_pipe->bottom_pipe->plane_res.mpcc_inst)) + new_pipe->update_flags.bits.mpcc = 1; + + /* Detect dppclk change */ + if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) + new_pipe->update_flags.bits.dppclk = 1; + + /* Check for scl update */ + if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) + new_pipe->update_flags.bits.scaler = 1; + /* Check for vp update */ + if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) + || memcmp(&old_pipe->plane_res.scl_data.viewport_c, + &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) + new_pipe->update_flags.bits.viewport = 1; + + /* Detect dlg/ttu/rq updates */ + { + struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; + struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; + + /* Detect pipe interdependent updates */ + if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || + old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || + old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || + old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || + old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || + old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || + old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || + old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || + old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || + old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || + old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || + old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || + old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || + old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || + old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { + old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; + old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; + old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; + old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; + old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; + old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; + old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; + old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; + old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; + old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; + old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; + old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; + old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; + old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; + old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; + new_pipe->update_flags.bits.hubp_interdependent = 1; + } + /* Detect any other updates to ttu/rq/dlg */ + if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || + memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || + memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + } +} - interdependent_update = top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update; +static void dcn20_update_dchubp_dpp( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; - if (interdependent_update) - lock_all_pipes(dc, context, true); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, true); + if (pipe_ctx->update_flags.bits.dppclk) + dpp->funcs->dpp_dppclk_control(dpp, false, true); - if (num_planes == 0) { - /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG + * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. + * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG + */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); + + hubp->funcs->hubp_setup( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, + &pipe_ctx->rq_regs, + &pipe_ctx->pipe_dlg_param); + } + if (pipe_ctx->update_flags.bits.hubp_interdependent) + hubp->funcs->hubp_setup_interdependent( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); + + if (pipe_ctx->update_flags.bits.enable || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + struct dc_bias_and_scale bns_params = {0}; + + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + + if (dpp->funcs->dpp_program_bias_and_scale) { + //TODO :for CNVC set scale and bias registers if necessary + dcn10_build_prescale_params(&bns_params, plane_state); + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); + } } - /* Disconnect unused mpcc */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - /* - * Powergate reused pipes that are not powergated - * fairly hacky right now, using opp_id as indicator - * TODO: After move dc_post to dc_update, this will - * be removed. - */ - if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { - if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID) - dc->hwss.disable_plane(dc, old_pipe_ctx); + if (pipe_ctx->update_flags.bits.mpcc + || plane_state->update_flags.bits.global_alpha_change + || plane_state->update_flags.bits.per_pixel_alpha_change) { + /* Need mpcc to be idle if changing opp */ + if (pipe_ctx->update_flags.bits.opp_changed) { + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; + int mpcc_inst; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) + continue; + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + } } + dc->hwss.update_mpcc(dc, pipe_ctx); + } - if ((!pipe_ctx->plane_state || - pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); + /* scaler configuration */ + pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); - removed_pipe[i] = true; + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + hubp->funcs->mem_program_viewport( + hubp, + &pipe_ctx->plane_res.scl_data.viewport, + &pipe_ctx->plane_res.scl_data.viewport_c); + + /* Any updates are handled in dc interface, just need to apply existing for plane enable */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + } - DC_LOG_DC("Reset mpcc for pipe %d\n", - old_pipe_ctx->pipe_idx); - } + /* Any updates are handled in dc interface, just need + * to apply existing for plane enable / opp change */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->stream->update_flags.bits.gamut_remap + || pipe_ctx->stream->update_flags.bits.out_csc) { + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); + + /*call the dcn2 method which uses mpc csc*/ + dc->hwss.program_output_csc(dc, + pipe_ctx, + pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, + hubp->opp_id); } - if (num_planes > 0) - dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context); + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hubp->funcs->hubp_program_surface_config( + hubp, + plane_state->format, + &plane_state->tiling_info, + &size, + plane_state->rotation, + &plane_state->dcc, + plane_state->horizontal_mirror, + 0); + hubp->power_gated = false; + } - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) + dc->hwss.update_plane_addr(dc, pipe_ctx); - if (interdependent_update) - for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (pipe_ctx->update_flags.bits.enable) + hubp->funcs->set_blank(hubp, false); +} + + +static void dcn20_program_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + /* Only need to unblank on top pipe */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) + && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) + dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + + if (pipe_ctx->update_flags.bits.global_sync) { + pipe_ctx->stream_res.tg->funcs->program_global_sync( + pipe_ctx->stream_res.tg, + pipe_ctx->pipe_dlg_param.vready_offset, + pipe_ctx->pipe_dlg_param.vstartup_start, + pipe_ctx->pipe_dlg_param.vupdate_offset, + pipe_ctx->pipe_dlg_param.vupdate_width); + + pipe_ctx->stream_res.tg->funcs->set_vtg_params( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + } + + if (pipe_ctx->update_flags.bits.odm) + dc->hwss.update_odm(dc, context, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) + dcn20_enable_plane(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) + dcn20_update_dchubp_dpp(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) + set_hdr_multiplier(pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) + dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->signal); + + pipe_ctx->stream_res.opp->funcs->opp_program_fmt( + pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, + &pipe_ctx->stream->clamping); + } +} + +static bool does_pipe_need_lock(struct pipe_ctx *pipe) +{ + if ((pipe->plane_state && pipe->plane_state->update_flags.raw) + || pipe->update_flags.raw) + return true; + if (pipe->bottom_pipe) + return does_pipe_need_lock(pipe->bottom_pipe); + + return false; +} + +static void dcn20_program_front_end_for_ctx( + struct dc *dc, + struct dc_state *context) +{ + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + int i; + bool pipe_locked[MAX_PIPES] = {false}; + DC_LOGGER_INIT(dc->ctx->logger); + + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) + context->res_ctx.pipe_ctx[i].stream_res.gsl_group = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; + + /* Set pipe update flags and lock pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + &context->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (!context->res_ctx.pipe_ctx[i].top_pipe && + does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream || - !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) - continue; + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); + pipe_locked[i] = true; + } - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( - pipe_ctx->plane_res.hubp, - &pipe_ctx->dlg_regs, - &pipe_ctx->ttu_regs); + /* OTG blank before disabling all front ends */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + && !context->res_ctx.pipe_ctx[i].top_pipe + && !context->res_ctx.pipe_ctx[i].prev_odm_pipe + && context->res_ctx.pipe_ctx[i].stream) + dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + + /* Disconnect mpcc */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { + dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } - if (interdependent_update) - lock_all_pipes(dc, context, false); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, false); + /* + * Program all updated pipes, order matters for mpcc setup. Start with + * top pipe and program all pipes that follow in order + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe) { + while (pipe) { + dcn20_program_pipe(dc, pipe, context); + pipe = pipe->bottom_pipe; + } + /* Program secondary blending tree and writeback pipes */ + pipe = &context->res_ctx.pipe_ctx[i]; + if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 + && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) + && dc->hwss.program_all_writeback_pipes_in_tree) + dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + } + } + /* Unlock all locked pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) - dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + if (pipe_locked[i]) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface( * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which * is unsupported on DCN. */ - i = 0; - if (num_planes > 0 && top_pipe_to_program && - (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) { - while (i < TIMEOUT_FOR_PIPE_ENABLE_MS && - top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) { - i += 1; - msleep(1); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { + struct hubp *hubp = pipe->plane_res.hubp; + int j = 0; + + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + msleep(1); } } + + /* WA to apply WM setting*/ + if (dc->hwseq->wa.DEGVIDCN21) + dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); } @@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + if (pipe_ctx->prev_odm_pipe == NULL) dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth( static void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info) + struct dc_writeback_info *wb_info, + struct dc_state *context) { struct dwbc *dwb; struct mcif_wb *mcif_wb; @@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback( optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); - mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); /* Enable MCIF_WB */ mcif_wb->funcs->enable_mcif(mcif_wb); /* Enable DWB */ @@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap( } } +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + const struct tg_color pipe_colors[6] = { + {MAX_TG_COLOR_VALUE, 0, 0}, // red + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow + {0, MAX_TG_COLOR_VALUE, 0}, // blue + {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple + {0, 0, MAX_TG_COLOR_VALUE}, // green + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange + }; + + struct pipe_ctx *top_pipe = pipe_ctx; + + while (top_pipe->top_pipe) { + top_pipe = top_pipe->top_pipe; + } + + *color = pipe_colors[top_pipe->pipe_idx]; +} + static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { dcn10_get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); + } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { + dcn20_get_mpctree_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); } if (per_pixel_alpha) @@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - if (link->dc->hwss.program_dmdata_engine) - link->dc->hwss.program_dmdata_engine(pipe_ctx); + if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { + if (link->dc->hwss.program_dmdata_engine) + link->dc->hwss.program_dmdata_engine(pipe_ctx); + } link->dc->hwss.update_info_frame(pipe_ctx); @@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface; + dc->hwss.apply_ctx_for_surface = NULL; + dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 92ab3dd91814..3098f1049ed7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -96,4 +96,20 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); void dcn20_display_init(struct dc *dc); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_plane( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 3736b5548a25..0c98a0bbbd14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -91,6 +91,13 @@ struct mpll_cfg { uint32_t ref_range; uint32_t ref_clk; bool hdmimode_enable; + bool sup_pre_hp; + bool dp_tx0_vergdrv_byp; + bool dp_tx1_vergdrv_byp; + bool dp_tx2_vergdrv_byp; + bool dp_tx3_vergdrv_byp; + + }; struct dpcssys_phy_seq_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 2137e2be2140..3b613fb93ef8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 2; else *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; } void optc2_set_dwb_source(struct timing_generator *optc, @@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, - .is_matching_timing = optc1_is_matching_timing + .get_hw_timing = optc1_get_hw_timing, }; void dcn20_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6b2f2f1a1c9c..bbd1c98564be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN10 }; #define dwbc_regs_dcn2(id)\ @@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + default: + ASSERT(0); + return 0; + } +} + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ @@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, @@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc20) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1159,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn20_hwseq_create, }; +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); @@ -1601,7 +1648,7 @@ static void swizzle_to_dml_params( } } -static bool dcn20_split_stream_for_odm( +bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, @@ -1622,7 +1669,6 @@ static bool dcn20_split_stream_for_odm( next_odm_pipe->stream_res.dsc = NULL; #endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { - ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } @@ -1679,7 +1725,7 @@ static bool dcn20_split_stream_for_odm( return true; } -static void dcn20_split_stream_for_mpc( +void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, @@ -1765,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context( pipe_cnt = i; continue; } - if (!resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; @@ -1897,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context( break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; - pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; + pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ @@ -1911,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; +#endif + /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* @@ -2132,7 +2183,7 @@ void dcn20_set_mcif_arb_params( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2167,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } #endif -static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -2207,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { + if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { @@ -2243,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -bool dcn20_fast_validate_bw( +void dcn20_merge_pipes_for_validate( struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out) + struct dc_state *context) { - bool out = false; - - int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; - bool odm_capable = context->bw_ctx.dml.ip.odm_capable; - bool force_split = false; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - bool failed_non_odm_dsc = false; -#endif - int split_threshold = dc->res_pool->pipe_count / 2; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - - - ASSERT(pipes); - if (!pipes) - return false; + int i; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2320,51 +2354,19 @@ bool dcn20_fast_validate_bw( if (pipe->plane_state) resource_build_scaling_params(pipe); } +} - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - context->bw_ctx.dml.ip.odm_capable = 0; - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.ip.odm_capable = odm_capable; - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - /* 1 dsc per stream dsc validation */ - if (vlevel <= context->bw_ctx.dml.soc.num_states) - if (!dcn20_validate_dsc(dc, context)) { - failed_non_odm_dsc = true; - vlevel = context->bw_ctx.dml.soc.num_states + 1; - } -#endif - - if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) - || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) - context->commit_hints.full_update_needed = true; - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split) +{ + int i, pipe_idx, vlevel_split; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - /* Single display only conditionals get set here */ + /* Single display loop, exits if there is more than one display */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; @@ -2391,38 +2393,107 @@ bool dcn20_fast_validate_bw( if (exit_loop) break; } - - if (context->stream_count > split_threshold) + /* TODO: fix dc bugs and remove this split threshold thing */ + if (context->stream_count > dc->res_pool->pipe_count / 2) avoid_split = true; - vlevel_unsplit = vlevel; + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + pipe_idx++; + } + context->bw_ctx.dml.vba.maxMpcComb = 0; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!context->res_ctx.pipe_ctx[i].stream) continue; - for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) - if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) - break; + + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + split[i] = true; + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = true; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + } + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; pipe_idx++; } + return vlevel; +} + +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out) +{ + bool out = false; + bool split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - bool need_split = true; - bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; - if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { - force_split = true; - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; - } - if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); @@ -2440,40 +2511,26 @@ bool dcn20_fast_validate_bw( if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; - need_split3d = ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE)); - - if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { - need_split = false; - vlevel = vlevel_unsplit; - context->bw_ctx.dml.vba.maxMpcComb = 0; - } else - need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; - /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; - if (need_split3d || need_split || force_split) { + if (split[i]) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe || force_split); - if (!hsplit_pipe) + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; continue; - + } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, @@ -2487,7 +2544,7 @@ bool dcn20_fast_validate_bw( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ - if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { + if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; @@ -2506,7 +2563,7 @@ validate_out: return out; } -void dcn20_calculate_wm( +static void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -2527,7 +2584,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; @@ -2536,7 +2593,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } @@ -2579,6 +2636,11 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2590,6 +2652,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2601,6 +2667,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2610,6 +2680,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif } void dcn20_calculate_dlg_params( @@ -2629,7 +2703,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; - context->bw_ctx.bw.dcn.clk.fclk_khz = 0; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; @@ -2645,8 +2719,8 @@ void dcn20_calculate_dlg_params( continue; if (!visited[pipe_idx]) { - display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; - display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; + display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; + display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; @@ -2806,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, ASSERT(false); restore_dml_state: - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; @@ -2892,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; @@ -2900,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) int i; uint32_t pipe_count = pool->res_cap->num_dwb; - ASSERT(pipe_count > 0); - for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); @@ -2947,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); @@ -2962,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) return pp_smu; } -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -2970,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) } } -static void cap_soc_clocks( +void dcn20_cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { @@ -3037,10 +3109,10 @@ static void cap_soc_clocks( } } -static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; + struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3048,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (num_states == 0) return; + memset(calculated_states, 0, sizeof(calculated_states)); + if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; - else - // Accounting for SOC/DCF relationship, we can go as high as - // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. - min_dcfclk = 507; + else { + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + min_dcfclk = 310; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. + min_dcfclk = 506; + } for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; @@ -3093,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ bb->clock_limits[num_calculated_states].state = bb->num_states; } -static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns @@ -3292,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc, } if (clock_limits_available && uclk_states_available && num_states) - update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); + dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) - cap_soc_clocks(loaded_bb, max_clocks); + dcn20_cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; - patch_bounding_box(dc, loaded_bb); + dcn20_patch_bounding_box(dc, loaded_bb); return true; } @@ -3345,6 +3423,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 44f95aa0d61e..fef473d68a4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx); -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); +void dcn20_cap_soc_clocks( + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks); +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states); struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst); @@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context); +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split); +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); +#endif +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe); +bool dcn20_split_stream_for_odm( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe); +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe); bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 5ab9d6240498..4b3401616434 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = + enc1_stream_encoder_dp_get_pixel_format, + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ff50ae71fe27..14113ccf498d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,7 +1,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o +DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index d1266741763b..f546260c15b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include <linux/delay.h> #include "dm_services.h" #include "dcn20/dcn20_hubbub.h" #include "dcn21_hubbub.h" @@ -51,7 +52,7 @@ #ifdef NUM_VMID #undef NUM_VMID #endif -#define NUM_VMID 1 +#define NUM_VMID 16 static uint32_t convert_and_clamp( uint32_t wm_ns, @@ -71,56 +72,76 @@ static uint32_t convert_and_clamp( void dcn21_dchvm_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; //Init DCHVM block REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); //Poll until RIOMMU_ACTIVE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100); + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); - //Poll until HOSTVM_PREFETCH_DONE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + } } -static int hubbub21_init_dchub(struct hubbub *hubbub, +int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } dcn21_dchvm_init(hubbub); return NUM_VMID; } -static void hubbub21_program_urgent_watermarks( +void hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { + hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { + hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } + /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; @@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { + hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } + /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; @@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); } + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { + hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } } -static void hubbub21_program_stutter_watermarks( +void hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks( } } -static void hubbub21_program_pstate_watermarks( +void hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } +void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} static const struct hubbub_funcs hubbub21_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = NULL, + .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub2_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub21_program_watermarks, + .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, }; void hubbub21_construct(struct dcn20_hubbub *hubbub, @@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index 6ff3cdb89178..c4840dfb1fa5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -36,6 +36,10 @@ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DCHVM_CTRL0), \ SR(DCHVM_MEM_CTRL), \ @@ -44,16 +48,9 @@ SR(DCHVM_RIOMMU_STAT0) #define HUBBUB_REG_LIST_DCN21()\ - HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_REG_LIST_DCN20_COMMON(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_HVM_REG_LIST(), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE) + HUBBUB_HVM_REG_LIST() #define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ @@ -102,7 +99,7 @@ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ - HUBBUB_MASK_SH_LIST_HVM(mask_sh),\ + HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -114,11 +111,28 @@ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh) void dcn21_dchvm_init(struct hubbub *hubbub); +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); void hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); +void hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); void hubbub21_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b..2f5a5867e674 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -22,6 +22,8 @@ * Authors: AMD * */ + +#include "dcn10/dcn10_hubp.h" #include "dcn21_hubp.h" #include "dm_services.h" @@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp2_program_surface_config, + .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c new file mode 100644 index 000000000000..b25215cadf85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -0,0 +1,122 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "resource.h" +#include "dce/dce_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "vmid.h" +#include "reg_helper.h" +#include "hw/clk_mgr.h" + + +#define DC_LOGGER_INIT(logger) + +#define CTX \ + hws->ctx +#define REG(reg)\ + hws->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + +/* Temporary read settings, future will get values from kmd directly */ +static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config, + struct dce_hwseq *hws) +{ + uint32_t page_table_base_hi; + uint32_t page_table_base_lo; + + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi); + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo); + + config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo; + +} + +static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +{ + struct dcn_hubbub_phys_addr_config config; + + config.system_aperture.fb_top = pa_config->system_aperture.fb_top; + config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; + config.system_aperture.fb_base = pa_config->system_aperture.fb_base; + config.system_aperture.agp_top = pa_config->system_aperture.agp_top; + config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; + config.system_aperture.agp_base = pa_config->system_aperture.agp_base; + config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; + config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; + config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + + mmhub_update_page_table_config(&config, hws); + + return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); +} + +// work around for Renoir s0i3, if register is programmed, bypass golden init. + +static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t value = 0; + + value = REG_READ(MICROSECOND_TIME_BASE_DIV); + + return value != 0x00120464; +} + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); +} + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); +} + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dcn20_hw_sequencer_construct(dc); + dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; + dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; + dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; + dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h new file mode 100644 index 000000000000..be67b62e6fb1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -0,0 +1,33 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN21_H__ +#define __DC_HWSS_DCN21_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c new file mode 100644 index 000000000000..e8a504ca5890 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -0,0 +1,470 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include <linux/delay.h> +#include "core_types.h" +#include "link_encoder.h" +#include "dcn21_link_encoder.h" +#include "stream_encoder.h" + +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +static struct mpll_cfg dcn21_mpll_cfg_ref[] = { + // RBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 238, + .mpllb_fracn_en = 0, + .mpllb_fracn_quot = 0, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 44237, + .mpllb_ssc_stepsize = 59454, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 2, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 2, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + // HBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 1, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR2 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR3 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 304, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 49152, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 55296, + .mpllb_ssc_stepsize = 74318, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 1, + .mpllb_ana_cp_int = 7, + .mpllb_ana_cp_prop = 16, + .hdmi_pixel_clk_div = 0, + }, +}; + + +static bool update_cfg_data( + struct dcn10_link_encoder *enc10, + const struct dc_link_settings *link_settings, + struct dpcssys_phy_seq_cfg *cfg) +{ + int i; + + cfg->load_sram_fw = false; + cfg->use_calibration_setting = true; + + //TODO: need to implement a proper lane mapping for Renoir. + for (i = 0; i < 4; i++) + cfg->lane_en[i] = true; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[0]; + break; + case LINK_RATE_HIGH: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[1]; + break; + case LINK_RATE_HIGH2: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[2]; + break; + case LINK_RATE_HIGH3: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[3]; + break; + default: + DC_LOG_ERROR("%s: No supported link rate found %X!\n", + __func__, link_settings->link_rate); + return false; + } + + return true; +} + +void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value); + + if (!value && link_settings->lane_count > LANE_COUNT_TWO) + link_settings->lane_count = LANE_COUNT_TWO; +} + +bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value); + + // if value == 1 alt mode is disabled, otherwise it is enabled + return !value; +} + +bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + int value; + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + + if (value == 1) { + ASSERT(0); + return false; + } + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 0); + + udelay(40); + + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + if (value == 1) { + ASSERT(0); + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + return false; + } + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1); + + return true; +} + + + +static void dcn21_link_encoder_release_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0); + +} + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10; + struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg; + + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source); + return; + } + + if (!update_cfg_data(enc10, link_settings, cfg)) + return; + + enc1_configure_encoder(enc10, link_settings); + + dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT); + +} + +void dcn21_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); +} + +void dcn21_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal) +{ + dcn10_link_encoder_disable_output(enc, signal); + + if (dc_is_dp_signal(signal)) + dcn21_link_encoder_release_phy(enc); +} + + +static const struct link_encoder_funcs dcn21_link_enc_funcs = { +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + .read_state = link_enc2_read_state, +#endif + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc2_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn21_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output, + .disable_output = dcn21_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .fec_is_active = enc2_fec_is_active, + .get_dig_frontend = dcn10_get_dig_frontend, + .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn21_link_encoder_get_max_link_cap, +}; + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc21->enc10; + + enc10->base.funcs = &dcn21_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h new file mode 100644 index 000000000000..1d7a1a51f13d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -0,0 +1,61 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN21_H__ +#define __DC_LINK_ENCODER__DCN21_H__ + +#include "dcn20/dcn20_link_encoder.h" + +struct dcn21_link_encoder { + struct dcn10_link_encoder enc10; + struct dpcssys_phy_seq_cfg phy_seq_cfg; +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index de182185fe1f..459bd9a5caed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "dc.h" @@ -42,11 +40,11 @@ #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn20/dcn20_opp.h" #include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_link_encoder.h" +#include "dcn21/dcn21_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" @@ -84,8 +82,9 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { - .gpuvm_enable = 0, - .hostvm_enable = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, @@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 4, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, /*Extra state, no dispclk ramping*/ @@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 5, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, }, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_6) }; +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN10_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) +}; + #ifdef CONFIG_DRM_AMD_DC_DMUB static const struct dcn21_dmcub_registers dmcub_regs = { DMCUB_REG_LIST_DCN() @@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(4), }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; @@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, - .num_audio = 6, // 6 audio endpoints. 4 audio streams + .num_audio = 4, // 4 audio endpoints. 4 audio streams .num_stream_encoder = 5, .num_pll = 5, // maybe 3 because the last two used for USB-c .num_dwb = 1, .num_ddc = 5, + .num_vmid = 1, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, #endif @@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 5120,/*upto 5K*/ + .max_downscale_src_width = 3840, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, - .disable_48mhz_pwrdwn = true, + .disable_48mhz_pwrdwn = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) - dcn20_pp_smu_destroy(&pool->base.pp_smu); + dcn21_pp_smu_destroy(&pool->base.pp_smu); } @@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel( #if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; #endif dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + kernel_fpu_begin(); + if (dc->bb_overrides.sr_exit_time_ns) { + bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { + bb->sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns) { + bb->dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + kernel_fpu_end(); +} + void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -988,6 +1052,8 @@ void dcn21_calculate_wm( ASSERT(bw_params); + patch_bounding_box(dc, &context->bw_ctx.dml.soc); + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1021,7 +1087,7 @@ void dcn21_calculate_wm( pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, + pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); } @@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create( static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + /* + TODO: Fix this function to calcualte correct values. + There are known issues with this function currently + that will need to be investigated. Use hardcoded known good values for now. + + struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; - dcn2_1_soc.num_states = 0; for (i = 0; i < clk_table->num_entries; i++) { @@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - /* This is probably wrong, TODO: find correct calculation */ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; - dcn2_1_soc.num_states++; } + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.num_states = i; + */ } /* Temporary Place holder until we can get them from fuse */ @@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = { }; -enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, +static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { return PP_SMU_RESULT_OK; } -enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, +static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, struct dpm_clocks *clock_table) { *clock_table = dummy_clocks; return PP_SMU_RESULT_OK; } -struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - pp_smu->ctx.ver = PP_SMU_VER_RN; + if (!pp_smu) + return pp_smu; + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) { + pp_smu->ctx.ver = PP_SMU_VER_RN; + pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; + pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + } else { - pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; - pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_RN) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + } return pp_smu; } -void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN21 = true; } return hws; } @@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn21_hwseq_create, }; +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + default: + ASSERT(0); + return 0; + } +} + +static struct link_encoder *dcn21_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn21_link_encoder *enc21 = + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc21) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn21_link_encoder_construct(enc21, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc21->enc10.base; +} +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +{ + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipes[i].pipe.src.hostvm = 1; + pipes[i].pipe.src.gpuvm = 1; + } + + return pipe_cnt; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, - .link_enc_create = dcn20_link_encoder_create, + .link_enc_create = dcn21_link_encoder_create, .validate_bandwidth = dcn21_validate_bandwidth, + .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, @@ -1437,9 +1662,11 @@ static bool construct( struct dc *dc, struct dcn21_resource_pool *pool) { - int i; + int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes; ctx->dc_bios->regs = &bios_regs; @@ -1457,7 +1684,9 @@ static bool construct( *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = 4; + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; @@ -1467,6 +1696,7 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1516,6 +1746,26 @@ static bool construct( goto create_fail; } + pool->base.dmcu = dcn20_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + #ifdef CONFIG_DRM_AMD_DC_DMUB pool->base.dmcub = dcn21_dmcub_create(ctx, &dmcub_regs, @@ -1530,6 +1780,14 @@ static bool construct( pool->base.pp_smu = dcn21_pp_smu_create(ctx); + num_pipes = dcn2_1_ip.max_num_dpp; + + for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn2_1_ip.max_num_dpp = num_pipes; + dcn2_1_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); init_data.ctx = dc->ctx; @@ -1537,8 +1795,15 @@ static bool construct( if (!pool->base.irqs) goto create_fail; + j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + pool->base.hubps[i] = dcn21_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1562,6 +1827,23 @@ static bool construct( "DC: failed to create dpps!\n"); goto create_fail; } + + pool->base.opps[i] = dcn21_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[i] = dcn21_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { @@ -1582,27 +1864,9 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn21_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; pool->base.mpc = dcn21_mpc_create(ctx); if (pool->base.mpc == NULL) { @@ -1645,7 +1909,7 @@ static bool construct( &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; - dcn20_hw_sequencer_construct(dc); + dcn21_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h new file mode 100644 index 000000000000..626d22d437f4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DM_CP_PSP_IF__H +#define DM_CP_PSP_IF__H + +struct dc_link; + +struct cp_psp_stream_config { + uint8_t otg_inst; + uint8_t link_enc_inst; + uint8_t stream_enc_inst; + void *dm_stream_ctx; + bool dpms_off; +}; + +struct cp_psp_funcs { + void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config); +}; + +struct cp_psp { + void *handle; + struct cp_psp_funcs funcs; +}; + + +#endif /* DM_CP_PSP_IF__H */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index b6b4333737f2..94b75e942607 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( /* * Polls for ACT (allocation change trigger) handled and */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream); /* diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index c03a441ee638..ef7df9ef6d7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -249,10 +249,8 @@ struct pp_smu_funcs_nv { }; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) - #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 -#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4 +#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 #define PP_SMU_NUM_FCLK_DPM_LEVELS 4 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 @@ -288,7 +286,6 @@ struct pp_smu_funcs_rn { enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp, struct dpm_clocks *clock_table); }; -#endif struct pp_smu_funcs { struct pp_smu ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 0fafd693ffb4..3c70dd577292 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_DSC_IMAGE_WIDTH 5184 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { @@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 878bf4782ce6..2c7455e22a65 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index ed8bf5f723c9..1e6aeb1bd2bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 3b6ed60dcd35..ba77957aefe3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN21_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP( return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; + else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) + return 18; else return BPP_INVALID; } @@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f4c1ef9046bf..cfacd6027467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -269,7 +269,7 @@ struct writeback_st { struct _vcs_dpi_display_output_params_st { int dp_lanes; - int output_bpp; + double output_bpp; int dsc_enable; int wb_enable; int num_active_wb; @@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; + unsigned char embedded; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 65cf4edddaff..7f9a5621922f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; + mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); @@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; + mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = + dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); + mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = + dout->output_bpp; mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] = (enum output_encoder_class) (dout->output_type); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 91decac50557..1540ffbe3979 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -387,6 +387,7 @@ struct vba_vars_st { /* vba mode support */ /*inputs*/ + bool EmbeddedPanel[DC__NUM_DPP__MAX]; bool SupportGFX7CompatibleTilingIn32bppAnd64bpp; double MaxHSCLRatio; double MaxVSCLRatio; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index ad8571f5a142..4c3e9cc30167 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -243,7 +243,7 @@ void dml1_extract_rq_regs( rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - /* FIXME: take the max between luma, chroma chunk size? + /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ @@ -602,7 +602,7 @@ static void get_surf_rq_param( unsigned int log2_dpte_group_length; unsigned int func_meta_row_height, func_dpte_row_height; - /* FIXME check if ppe apply for both luma and chroma in 422 case */ + /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params( ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ - prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */ + prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; min_ttu_vblank = dlg_sys_param.t_urg_wm_us; @@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params( dcc_en = e2e_pipe_param.pipe.src.dcc; dual_plane = is_dual_plane( (enum source_format_class) e2e_pipe_param.pipe.src.source_format); - mode_422 = 0; /* FIXME */ + mode_422 = 0; /* TODO */ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( (enum source_format_class) e2e_pipe_param.pipe.src.source_format, @@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params( cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1) * (double) cur0_req_width; cur0_req_per_width = cur0_width_ub / (double) cur0_req_width; - hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */ + hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */ if (vratio_pre_l <= 1.0) { refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 5995bcdfed54..e60f760585e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -23,8 +23,7 @@ */ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dc.h" -#include "core_types.h" +#include "dc_hw_types.h" #include "dsc.h" #include <drm/drm_dp_helper.h> @@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = { /* This module's internal functions */ +static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + + if (timing->flags.DSC) { + kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); + kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + return kbps; + } + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } + + ASSERT(bits_per_channel != 0); + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; + +} + static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp } static void get_dsc_enc_caps( - const struct dc *dc, + const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { // This is a static HW query, so we can use any DSC - struct display_stream_compressor *dsc = dc->res_pool->dscs[0]; memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) @@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range( struct dc_dsc_bw_range *range) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz); @@ -512,6 +563,7 @@ static bool setup_dsc_config( const struct dsc_enc_caps *dsc_enc_caps, int target_bandwidth_kbps, const struct dc_crtc_timing *timing, + int min_slice_height_override, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -680,7 +732,10 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. - slice_height = min(dsc_policy.min_sice_height, pic_height); + if (min_slice_height_override == 0) + slice_height = min(dsc_policy.min_sice_height, pic_height); + else + slice_height = min(min_slice_height_override, pic_height); while (slice_height < pic_height && (pic_height % slice_height != 0 || (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0))) @@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp * If DSC is not possible, leave '*range' untouched. */ bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_bpp, const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range( struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) - is_dsc_possible = setup_dsc_config(dsc_sink_caps, - &dsc_enc_caps, - 0, - timing, &config); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, + dsc_min_slice_height_override, &config); if (is_dsc_possible) get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range); @@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range( } bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg) @@ -841,11 +896,11 @@ bool dc_dsc_compute_config( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, dsc_cfg); + timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } #endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index ca51e83f8764..76c4b12d6824 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com { float bpp_group; float initial_xmit_delay_factor; - int source_bpp; int padding_pixels; int i; @@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->initial_xmit_delay++; } - source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5); - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_det_thresh = 2 << (bpc - 8); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index f8f85490e77e..f67c18375bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -321,8 +321,6 @@ void dal_gpio_destroy( return; } - dal_gpio_close(*gpio); - switch ((*gpio)->id) { case GPIO_ID_DDC_DATA: kfree((*gpio)->hw_container.ddc); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index d03165e71dc6..92280cc05e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux( return; } - dal_gpio_close(*mux); dal_gpio_destroy(mux); kfree(*mux); @@ -460,7 +459,6 @@ void dal_gpio_destroy_irq( return; } - dal_gpio_close(*irq); dal_gpio_destroy(irq); kfree(*irq); diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile new file mode 100644 index 000000000000..4170b6eb9ec0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -0,0 +1,28 @@ +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'hdcp' sub-component of DAL. +# + +HDCP_MSG = hdcp_msg.o + +AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG) diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c new file mode 100644 index 000000000000..6f730b5bfe42 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/slab.h> + +#include "dm_services.h" +#include "dm_helpers.h" +#include "include/hdcp_types.h" +#include "include/i2caux_interface.h" +#include "include/signal_types.h" +#include "core_types.h" +#include "dc_link_ddc.h" +#include "link_hwss.h" + +#define DC_LOGGER \ + link->ctx->logger +#define HDCP14_KSV_SIZE 5 +#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE + +static const bool hdcp_cmd_is_read[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = true, + [HDCP_MESSAGE_ID_READ_RI_R0] = true, + [HDCP_MESSAGE_ID_READ_PJ] = true, + [HDCP_MESSAGE_ID_WRITE_AKSV] = false, + [HDCP_MESSAGE_ID_WRITE_AINFO] = false, + [HDCP_MESSAGE_ID_WRITE_AN] = false, + [HDCP_MESSAGE_ID_READ_VH_X] = true, + [HDCP_MESSAGE_ID_READ_VH_0] = true, + [HDCP_MESSAGE_ID_READ_VH_1] = true, + [HDCP_MESSAGE_ID_READ_VH_2] = true, + [HDCP_MESSAGE_ID_READ_VH_3] = true, + [HDCP_MESSAGE_ID_READ_VH_4] = true, + [HDCP_MESSAGE_ID_READ_BCAPS] = true, + [HDCP_MESSAGE_ID_READ_BSTATUS] = true, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true, + [HDCP_MESSAGE_ID_READ_BINFO] = true, + [HDCP_MESSAGE_ID_HDCP2VERSION] = true, + [HDCP_MESSAGE_ID_RX_CAPS] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = true, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [HDCP_MESSAGE_ID_READ_PJ] = 0xA, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70 +}; + +struct protection_properties { + bool supported; + bool (*process_transaction)( + struct dc_link *link, + struct hdcp_protection_message *message_info); +}; + +static const struct protection_properties non_supported_protection = { + .supported = false +}; + +static bool hdmi_14_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + uint8_t *buff = NULL; + bool result; + const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ + const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ + struct i2c_command i2c_command; + uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; + struct i2c_payload i2c_payloads[] = { + { true, 0, 1, &offset }, + /* actual hdcp payload, will be filled later, zeroed for now*/ + { 0 } + }; + + switch (message_info->link) { + case HDCP_LINK_SECONDARY: + i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; + i2c_payloads[1].address = hdcp_i2c_addr_link_secondary; + break; + case HDCP_LINK_PRIMARY: + default: + i2c_payloads[0].address = hdcp_i2c_addr_link_primary; + i2c_payloads[1].address = hdcp_i2c_addr_link_primary; + break; + } + + if (hdcp_cmd_is_read[message_info->msg_id]) { + i2c_payloads[1].write = false; + i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads); + i2c_payloads[1].length = message_info->length; + i2c_payloads[1].data = message_info->data; + } else { + i2c_command.number_of_payloads = 1; + buff = kzalloc(message_info->length + 1, GFP_KERNEL); + + if (!buff) + return false; + + buff[0] = offset; + memmove(&buff[1], message_info->data, message_info->length); + i2c_payloads[0].length = message_info->length + 1; + i2c_payloads[0].data = buff; + } + + i2c_command.payloads = i2c_payloads; + i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW + i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz; + + result = dm_helpers_submit_i2c( + link->ctx, + link, + &i2c_command); + kfree(buff); + + return result; +} + +static const struct protection_properties hdmi_14_protection = { + .supported = true, + .process_transaction = hdmi_14_process_transaction +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 +}; + +static bool dpcd_access_helper( + struct dc_link *link, + uint32_t length, + uint8_t *data, + uint32_t dpcd_addr, + bool is_read) +{ + enum dc_status status; + uint32_t cur_length = 0; + uint32_t offset = 0; + uint32_t ksv_read_size = 0x6803b - 0x6802c; + + /* Read KSV, need repeatedly handle */ + if (dpcd_addr == 0x6802c) { + if (length % HDCP14_KSV_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n", + __func__, + length, + HDCP14_KSV_SIZE); + } + if (length > HDCP14_MAX_KSV_FIFO_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n", + __func__, + length, + HDCP14_MAX_KSV_FIFO_SIZE); + } + + DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n", + __func__, + length / HDCP14_KSV_SIZE); + + while (length > 0) { + if (length > ksv_read_size) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + ksv_read_size); + + data += ksv_read_size; + length -= ksv_read_size; + } else { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + length); + + data += length; + length = 0; + } + + if (status != DC_OK) + return false; + } + } else { + while (length > 0) { + if (length > DEFAULT_AUX_MAX_DATA_SIZE) + cur_length = DEFAULT_AUX_MAX_DATA_SIZE; + else + cur_length = length; + + if (is_read) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } else { + status = core_link_write_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } + + if (status != DC_OK) + return false; + + length -= cur_length; + offset += cur_length; + } + } + return true; +} + +static bool dp_11_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + return dpcd_access_helper( + link, + message_info->length, + message_info->data, + hdcp_dpcd_addrs[message_info->msg_id], + hdcp_cmd_is_read[message_info->msg_id]); +} + +static const struct protection_properties dp_11_protection = { + .supported = true, + .process_transaction = dp_11_process_transaction +}; + diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f189307750ab..a831079607cd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #include "clock_source.h" #include "audio.h" #include "dm_pp_smu.h" - +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif /************ link *****************/ struct link_init_data { @@ -231,6 +233,7 @@ struct resource_pool { struct dcn_fe_bandwidth { int dppclk_khz; + }; struct stream_resource { @@ -395,10 +398,6 @@ struct dc_state { struct clk_mgr *clk_mgr; - struct { - bool full_update_needed : 1; - } commit_hints; - struct kref refcount; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index b1fab251c09b..14716ba35662 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload); + int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_channel_operation_result *operation_result); @@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout); + void dal_ddc_service_write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 08a4df2c61a8..045138dbdccb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,6 +28,8 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ struct dc_link; struct dc_stream_state; @@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries( struct dc_link_settings *known_limit_link_setting, int attempts); +bool dp_verify_mst_link_cap( + struct dc_link *link); + bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index e79cd4e92919..e77b3a76766d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -140,6 +140,9 @@ struct write_command_context { struct aux_engine_funcs { + bool (*configure_timeout)( + struct ddc_service *ddc, + uint32_t timeout); void (*destroy)( struct aux_engine **ptr); bool (*acquire_engine)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 76f9ad1b23df..4e18e77dcf42 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -47,7 +47,7 @@ #ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ -#define MAX_NUM_DPM_LVL 4 +#define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 @@ -180,13 +180,19 @@ struct clk_mgr_funcs { struct dc_state *context, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + + bool (*are_clock_states_equal) (struct dc_clocks *a, + struct dc_clocks *b); + void (*notify_wm_ranges)(struct clk_mgr *clk_mgr); }; struct clk_mgr { struct dc_context *ctx; struct clk_mgr_funcs *funcs; struct dc_clocks clks; + bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes + int dentist_vco_freq_khz; #ifdef CONFIG_DRM_AMD_DC_DCN2_1 struct clk_bw_params *bw_params; #endif @@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr); +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + #endif /* __DAL_CLK_MGR_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 7dd46eb96d67..a17a77192690 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -184,6 +184,21 @@ struct clk_mgr_registers { uint32_t MP1_SMN_C2PMSG_91; }; +enum clock_type { + clock_type_dispclk = 1, + clock_type_dcfclk, + clock_type_socclk, + clock_type_pixelclk, + clock_type_phyclk, + clock_type_dppclk, + clock_type_fclk, + clock_type_dcfdsclk, + clock_type_dscclk, + clock_type_uclk, + clock_type_dramclk, +}; + + struct state_dependent_clocks { int display_clk_khz; int pixel_clk_khz; @@ -210,8 +225,6 @@ struct clk_mgr_internal { struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; /*TODO: figure out which of the below fields should be here vs in asic specific portion */ - int dentist_vco_freq_khz; - /* Cache the status of DFS-bypass feature*/ bool dfs_bypass_enabled; /* True if the DFS-bypass feature is enabled and active. */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index d8e744f366e5..05ee5295d2c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -38,8 +38,7 @@ struct dccg { struct dccg_funcs { void (*update_dpp_dto)(struct dccg *dccg, int dpp_inst, - int req_dppclk, - bool reduce_divider_only); + int req_dppclk); void (*get_dccg_ref_freq)(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index a6297219d7fc..c81a17aeaa25 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -147,6 +147,7 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa149..c6ff3d78b435 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -28,7 +28,11 @@ #include "dc_dsc.h" #include "dc_hw_types.h" -#include "dc_dp_types.h" +#include "dc_types.h" +/* do not include any other headers + * or else it might break Edid Utility functionality. + */ + /* Input parameters for configuring DSC from the outside of DSC */ struct dsc_config { @@ -81,12 +85,6 @@ struct dsc_enc_caps { uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ }; -struct display_stream_compressor { - const struct dsc_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index abb4e4237fb6..b21909216fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -184,6 +184,10 @@ struct link_encoder_funcs { bool (*fec_is_active)(struct link_encoder *enc); #endif bool (*is_in_alt_mode) (struct link_encoder *enc); + + void (*get_max_link_cap)(struct link_encoder *enc, + struct dc_link_settings *link_settings); + enum signal_type (*get_dig_mode)( struct link_encoder *enc); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index e8668388581b..67b610d6d91f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -43,6 +43,7 @@ struct dcn_watermarks { #if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; + int32_t urgent_latency_ns; #endif struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 957e9047381a..18def2b6fafe 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -208,6 +208,7 @@ struct output_pixel_processor { struct mpc_tree mpc_tree_params; bool mpcc_disconnect_pending[MAX_PIPES]; const struct opp_funcs *funcs; + uint32_t dyn_expansion; }; enum fmt_stereo_action { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe9b7a10a1c3..6305e388612a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -214,6 +214,11 @@ struct stream_encoder_funcs { unsigned int (*dig_source_otg)( struct stream_encoder *enc); + bool (*dp_get_pixel_format)( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 6196cc32356e..27c73caf74ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -261,6 +261,8 @@ struct timing_generator_funcs { void (*program_manual_trigger)(struct timing_generator *optc); void (*setup_manual_trigger)(struct timing_generator *optc); + bool (*get_hw_timing)(struct timing_generator *optc, + struct dc_crtc_timing *hw_crtc_timing); void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3a938cd414ea..d39c1e11def5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -114,6 +114,9 @@ struct hw_sequencer_funcs { int opp_id); #if defined(CONFIG_DRM_AMD_DC_DCN2_0) + void (*program_front_end_for_ctx)( + struct dc *dc, + struct dc_state *context); void (*program_triplebuffer)( const struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -229,6 +232,13 @@ struct hw_sequencer_funcs { struct dc *dc, struct dc_state *context); + void (*exit_optimized_pwr_state)( + const struct dc *dc, + struct dc_state *context); + void (*optimize_pwr_state)( + const struct dc *dc, + struct dc_state *context); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*update_bandwidth)( struct dc *dc, @@ -321,10 +331,12 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*enable_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); #endif @@ -337,6 +349,9 @@ struct hw_sequencer_funcs { enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + bool (*s0i3_golden_init_wa)(struct dc *dc); +#endif }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 18961707db23..9ad49da50a17 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -31,6 +31,8 @@ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9 #define DP_BRANCH_DEVICE_ID_00001A 0x00001A #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1 +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 +#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C enum ddc_result { DDC_RESULT_UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h new file mode 100644 index 000000000000..f31e6befc8d6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h @@ -0,0 +1,96 @@ +/* +* Copyright 2019 Advanced Micro Devices, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +* OTHER DEALINGS IN THE SOFTWARE. +* +* Authors: AMD +* +*/ + +#ifndef __DC_HDCP_TYPES_H__ +#define __DC_HDCP_TYPES_H__ + +enum hdcp_message_id { + HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + HDCP_MESSAGE_ID_READ_BKSV = 0, + /* HDMI is called Ri', DP is called R0' */ + HDCP_MESSAGE_ID_READ_RI_R0, + HDCP_MESSAGE_ID_READ_PJ, + HDCP_MESSAGE_ID_WRITE_AKSV, + HDCP_MESSAGE_ID_WRITE_AINFO, + HDCP_MESSAGE_ID_WRITE_AN, + HDCP_MESSAGE_ID_READ_VH_X, + HDCP_MESSAGE_ID_READ_VH_0, + HDCP_MESSAGE_ID_READ_VH_1, + HDCP_MESSAGE_ID_READ_VH_2, + HDCP_MESSAGE_ID_READ_VH_3, + HDCP_MESSAGE_ID_READ_VH_4, + HDCP_MESSAGE_ID_READ_BCAPS, + HDCP_MESSAGE_ID_READ_BSTATUS, + HDCP_MESSAGE_ID_READ_KSV_FIFO, + HDCP_MESSAGE_ID_READ_BINFO, + + /* HDCP 2.2 */ + + HDCP_MESSAGE_ID_HDCP2VERSION, + HDCP_MESSAGE_ID_RX_CAPS, + HDCP_MESSAGE_ID_WRITE_AKE_INIT, + HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + HDCP_MESSAGE_ID_WRITE_LC_INIT, + HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + HDCP_MESSAGE_ID_READ_RXSTATUS, + HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + + HDCP_MESSAGE_ID_MAX +}; + +enum hdcp_version { + HDCP_Unknown = 0, + HDCP_VERSION_14, + HDCP_VERSION_22, +}; + +enum hdcp_link { + HDCP_LINK_PRIMARY, + HDCP_LINK_SECONDARY +}; + +struct hdcp_protection_message { + enum hdcp_version version; + /* relevant only for DVI */ + enum hdcp_link link; + enum hdcp_message_id msg_id; + uint32_t length; + uint8_t max_retries; + uint8_t *data; +}; + +#endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..1de4805cb8c7 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space( return dc_fixpt_mul(args->arg, args->a1); } -static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) + +static struct fixed31_32 translate_from_linear_space_long( + struct translate_from_linear_space_args *args) +{ + const struct fixed31_32 one = dc_fixpt_from_int(1); + + if (dc_fixpt_lt(one, args->arg)) + return one; + + if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) + return dc_fixpt_sub( + args->a2, + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + dc_fixpt_neg(args->arg), + dc_fixpt_recip(args->gamma)))); + else if (dc_fixpt_le(args->a0, args->arg)) + return dc_fixpt_sub( + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + args->arg, + dc_fixpt_recip(args->gamma))), + args->a2); + else + return dc_fixpt_mul( + args->arg, + args->a1); +} + +static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf) { struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); @@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) scratch_gamma_args.a3 = dc_fixpt_zero; scratch_gamma_args.gamma = gamma; + if (use_eetf) + return translate_from_linear_space_long(&scratch_gamma_args); + return translate_from_linear_space(&scratch_gamma_args); } + static struct fixed31_32 translate_to_linear_space( struct fixed31_32 arg, struct fixed31_32 a0, @@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (fs_params->max_display < 100) // cap at 100 at the top max_display = dc_fixpt_from_int(100); - if (fs_params->min_content < fs_params->min_display) - use_eetf = true; - else - min_content = min_display; - + // only max used, we don't adjust min luminance if (fs_params->max_content > fs_params->max_display) use_eetf = true; else @@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) output = dc_fixpt_zero; else - output = calculate_gamma22(scaledX); + output = calculate_gamma22(scaledX, use_eetf); rgb->r = output; rgb->g = output; @@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, rgb_degamma_alloc_fail: return ret; } - - diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ec70c9b12e1a..16e69bbc69aa 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_EXIT_MARGIN 2000 +/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_MAX_MARGIN 2500 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp( current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) + v_total = stream->timing.v_total; + in_out_vrr->adjust.v_total_min = v_total; in_out_vrr->adjust.v_total_max = v_total; } @@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; - unsigned int min_frame_duration_in_ns = 0; - unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - in_out_vrr->max_refresh_in_uhz))); + unsigned int max_render_time_in_us = + in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; /* Program BTR */ - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > max_render_time_in_us) { + } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - in_out_vrr->btr.btr_active = true; + if (!in_out_vrr->btr.btr_active) { + in_out_vrr->btr.btr_active = true; + } } /* BTR set to "not active" so disengage */ @@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && + (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || + mid_point_frames_floor < 2)) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - in_out_vrr->max_duration_in_us) && + max_render_time_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Rounded to the nearest Hz */ + nominal_field_rate_in_uhz = 1000000ULL * + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); + min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + in_out_vrr->supported = true; } @@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; @@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us) { - struct core_freesync *core_freesync = NULL; - if (mod_freesync == NULL) return; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - if (vrr->supported) { *v_total_min = vrr->adjust.v_total_min; *v_total_max = vrr->adjust.v_total_max; @@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate( const struct dc_stream_state *stream) { unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int total = stream->timing.h_total * stream->timing.v_total; - /* Calculate nominal field rate for stream */ + /* Calculate nominal field rate for stream, rounded up to nearest integer */ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); return nominal_field_rate_in_uhz; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile new file mode 100644 index 000000000000..1c3c6d47973a --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -0,0 +1,32 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'hdcp' sub-module of DAL. +# + +HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ + hdcp1_execution.o hdcp1_transition.o + +AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP)) +#$(info ************ DAL-HDCP_MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c new file mode 100644 index 000000000000..d7ac445dec6f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -0,0 +1,426 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static void push_error_status(struct mod_hdcp *hdcp, + enum mod_hdcp_status status) +{ + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + + if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) { + trace->errors[trace->error_count].status = status; + trace->errors[trace->error_count].state_id = hdcp->state.id; + trace->error_count++; + HDCP_ERROR_TRACE(hdcp, status); + } + + hdcp->connection.hdcp1_retry_count++; +} + +static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) +{ + int i, display_enabled = 0; + + /* if all displays on the link are disabled, hdcp is not desired */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + display_enabled = 1; + break; + } + } + + return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && + display_enabled && !hdcp->connection.link.adjust.hdcp1.disable; +} + +static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_in_initialized_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* initialize transition input */ + memset(input, 0, sizeof(union mod_hdcp_transition_input)); + } else if (is_in_cp_not_desired_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* update topology event if hdcp is not desired */ + status = mod_hdcp_add_display_topology(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_execution(hdcp, + event_ctx, &input->hdcp1); + } +out: + return status; +} + +static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->unexpected_event) + goto out; + + if (is_in_initialized_state(hdcp)) { + if (is_dp_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + } else if (is_in_cp_not_desired_state(hdcp)) { + increment_stay_counter(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else { + status = MOD_HDCP_STATUS_INVALID_STATE; + } +out: + return status; +} + +static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_hdcp1(hdcp)) { + if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) + mod_hdcp_hdcp1_destroy_session(hdcp); + + if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_in_cp_not_desired_state(hdcp)) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } + +out: + /* stop callback and watchdog requests from previous authentication*/ + output->watchdog_timer_stop = 1; + output->callback_stop = 1; + return status; +} + +static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(output, 0, sizeof(struct mod_hdcp_output)); + + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (current_state(hdcp) != HDCP_UNINITIALIZED) { + HDCP_TOP_RESET_CONN_TRACE(hdcp); + set_state_id(hdcp, output, HDCP_UNINITIALIZED); + } + memset(&hdcp->connection, 0, sizeof(hdcp->connection)); +out: + return status; +} + +/* + * Implementation of functions in mod_hdcp.h + */ +size_t mod_hdcp_get_memory_size(void) +{ + return sizeof(struct mod_hdcp); +} + +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config) +{ + struct mod_hdcp_output output; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(hdcp, 0, sizeof(struct mod_hdcp)); + memset(&output, 0, sizeof(output)); + hdcp->config = *config; + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, &output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_output output; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + memset(&output, 0, sizeof(output)); + status = reset_connection(hdcp, &output); + if (status == MOD_HDCP_STATUS_SUCCESS) + memset(hdcp, 0, sizeof(struct mod_hdcp)); + else + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display_container = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* skip inactive display */ + if (display->state != MOD_HDCP_DISPLAY_ACTIVE) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* check existing display container */ + if (get_active_display_at_index(hdcp, display->index)) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* find an empty display container */ + display_container = get_empty_display_container(hdcp); + if (!display_container) { + status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND; + goto out; + } + + /* reset existing authentication status */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* add display to connection */ + hdcp->connection.link = *link; + *display_container = *display; + + /* reset retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication */ + if (current_state(hdcp) != HDCP_INITIALIZED) + set_state_id(hdcp, output, HDCP_INITIALIZED); + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* stop current authentication */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* remove display */ + display->state = MOD_HDCP_DISPLAY_INACTIVE; + + /* clear retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication for remaining displays*/ + if (get_active_display_count(hdcp) > 0) + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, + output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + goto out; + } + + /* populate query */ + query->link = &hdcp->connection.link; + query->display = display; + query->trace = &hdcp->connection.trace; + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); + +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status exec_status, trans_status, reset_status, status; + struct mod_hdcp_event_context event_ctx; + + HDCP_EVENT_TRACE(hdcp, event); + memset(output, 0, sizeof(struct mod_hdcp_output)); + memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); + event_ctx.event = event; + + /* execute and transition */ + exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); + trans_status = transition( + hdcp, &event_ctx, &hdcp->auth.trans_input, output); + if (trans_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_SUCCESS; + } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE; + push_error_status(hdcp, status); + } else { + status = exec_status; + push_error_status(hdcp, status); + } + + /* reset authentication if needed */ + if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) { + HDCP_FULL_DDC_TRACE(hdcp); + reset_status = reset_authentication(hdcp, output); + if (reset_status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, reset_status); + } + return status; +} + +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal) +{ + enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF; + + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + mode = MOD_HDCP_MODE_DEFAULT; + break; + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + mode = MOD_HDCP_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + mode = MOD_HDCP_MODE_DP_MST; + break; + default: + break; + }; + + return mode; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h new file mode 100644 index 000000000000..5664bc0b5bd0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -0,0 +1,442 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef HDCP_H_ +#define HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp_log.h" + +#define BCAPS_READY_MASK 0x20 +#define BCAPS_REPEATER_MASK 0x40 +#define BSTATUS_DEVICE_COUNT_MASK 0X007F +#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080 +#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800 +#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01 +#define BCAPS_REPEATER_MASK_DP 0x02 +#define BSTATUS_READY_MASK_DP 0x01 +#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02 +#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04 +#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08 +#define BINFO_DEVICE_COUNT_MASK_DP 0X007F +#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080 +#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 + +#define RXSTATUS_MSG_SIZE_MASK 0x03FF +#define RXSTATUS_READY_MASK 0x0400 +#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 +#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0 +#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01 +#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02 +#define RXSTATUS_READY_MASK_DP 0x0001 +#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002 +#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004 +#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008 +#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010 + +enum mod_hdcp_trans_input_result { + UNKNOWN = 0, + PASS, + FAIL +}; + +struct mod_hdcp_transition_input_hdcp1 { + uint8_t bksv_read; + uint8_t bksv_validation; + uint8_t add_topology; + uint8_t create_session; + uint8_t an_write; + uint8_t aksv_write; + uint8_t ainfo_write; + uint8_t bcaps_read; + uint8_t r0p_read; + uint8_t rx_validation; + uint8_t encryption; + uint8_t link_maintenance; + uint8_t ready_check; + uint8_t bstatus_read; + uint8_t max_cascade_check; + uint8_t max_devs_check; + uint8_t device_count_check; + uint8_t ksvlist_read; + uint8_t vp_read; + uint8_t ksvlist_vp_validation; + + uint8_t hdcp_capable_dp; + uint8_t binfo_read_dp; + uint8_t r0p_available_dp; + uint8_t link_integiry_check; + uint8_t reauth_request_check; + uint8_t stream_encryption_dp; +}; + +union mod_hdcp_transition_input { + struct mod_hdcp_transition_input_hdcp1 hdcp1; +}; + +struct mod_hdcp_message_hdcp1 { + uint8_t an[8]; + uint8_t aksv[5]; + uint8_t ainfo; + uint8_t bksv[5]; + uint16_t r0p; + uint8_t bcaps; + uint16_t bstatus; + uint8_t ksvlist[635]; + uint16_t ksvlist_size; + uint8_t vp[20]; + + uint16_t binfo_dp; +}; + +union mod_hdcp_message { + struct mod_hdcp_message_hdcp1 hdcp1; +}; + +struct mod_hdcp_auth_counters { + uint8_t stream_management_retry_count; +}; + +/* contains values per connection */ +struct mod_hdcp_connection { + struct mod_hdcp_link link; + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; + uint8_t is_repeater; + uint8_t is_km_stored; + struct mod_hdcp_trace trace; + uint8_t hdcp1_retry_count; +}; + +/* contains values per authentication cycle */ +struct mod_hdcp_authentication { + uint32_t id; + union mod_hdcp_message msg; + union mod_hdcp_transition_input trans_input; + struct mod_hdcp_auth_counters count; +}; + +/* contains values per state change */ +struct mod_hdcp_state { + uint8_t id; + uint32_t stay_count; +}; + +/* per event in a state */ +struct mod_hdcp_event_context { + enum mod_hdcp_event event; + uint8_t rx_id_list_ready; + uint8_t unexpected_event; +}; + +struct mod_hdcp { + /* per link */ + struct mod_hdcp_config config; + /* per connection */ + struct mod_hdcp_connection connection; + /* per authentication attempt */ + struct mod_hdcp_authentication auth; + /* per state in an authentication */ + struct mod_hdcp_state state; + /* reserved memory buffer */ + uint8_t buf[2025]; +}; + +enum mod_hdcp_initial_state_id { + HDCP_UNINITIALIZED = 0x0, + HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED, + HDCP_INITIALIZED, + HDCP_CP_NOT_DESIRED, + HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED +}; + +enum mod_hdcp_hdcp1_state_id { + HDCP1_STATE_START = HDCP_INITIAL_STATE_END, + H1_A0_WAIT_FOR_ACTIVE_RX, + H1_A1_EXCHANGE_KSVS, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER, + H1_A45_AUTHENTICATED, + H1_A8_WAIT_FOR_READY, + H1_A9_READ_KSV_LIST, + HDCP1_STATE_END = H1_A9_READ_KSV_LIST +}; + +enum mod_hdcp_hdcp1_dp_state_id { + HDCP1_DP_STATE_START = HDCP1_STATE_END, + D1_A0_DETERMINE_RX_HDCP_CAPABLE, + D1_A1_EXCHANGE_KSVS, + D1_A23_WAIT_FOR_R0_PRIME, + D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER, + D1_A4_AUTHENTICATED, + D1_A6_WAIT_FOR_READY, + D1_A7_READ_KSV_LIST, + HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST, +}; + +/* hdcp1 executions and transitions */ +typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp); +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str); +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); + +/* log functions */ +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size); +/* TODO: add adjustment log */ + +/* psp functions */ +enum mod_hdcp_status mod_hdcp_add_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_remove_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); +/* ddc functions */ +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); + +/* hdcp version helpers */ +static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || + hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT); +} + +/* hdcp state helpers */ +static inline uint8_t current_state(struct mod_hdcp *hdcp) +{ + return hdcp->state.id; +} + +static inline void set_state_id(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output, uint8_t id) +{ + memset(&hdcp->state, 0, sizeof(hdcp->state)); + hdcp->state.id = id; + /* callback timer should be reset per state */ + output->callback_stop = 1; + output->watchdog_timer_stop = 1; + HDCP_NEXT_STATE_TRACE(hdcp, id, output); +} + +static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_STATE_START && + current_state(hdcp) <= HDCP1_STATE_END); +} + +static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_DP_STATE_START && + current_state(hdcp) <= HDCP1_DP_STATE_END); +} + +static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp)); +} + +static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_CP_NOT_DESIRED; +} + +static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_INITIALIZED; +} + +/* transition operation helpers */ +static inline void increment_stay_counter(struct mod_hdcp *hdcp) +{ + hdcp->state.stay_count++; +} + +static inline void fail_and_restart_in_ms(uint16_t time, + enum mod_hdcp_status *status, + struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; + output->watchdog_timer_needed = 0; + output->watchdog_timer_delay = 0; + *status = MOD_HDCP_STATUS_RESET_NEEDED; +} + +static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; +} + +static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time, + struct mod_hdcp_output *output) +{ + output->watchdog_timer_needed = 1; + output->watchdog_timer_delay = time; +} + +/* connection topology helpers */ +static inline uint8_t is_display_active(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE; +} + +static inline uint8_t is_display_added(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; +} + +static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +} + +static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_active(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline struct mod_hdcp_display *get_first_added_display( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_active_display_at_index( + struct mod_hdcp *hdcp, uint8_t index) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (hdcp->connection.displays[i].index == index && + is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_empty_display_container( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (!is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline void reset_retry_counts(struct mod_hdcp *hdcp) +{ + hdcp->connection.hdcp1_retry_count = 0; +} + +#endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c new file mode 100644 index 000000000000..3db4a7da414f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -0,0 +1,531 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) +{ + uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv; + uint8_t count = 0; + + while (n) { + count++; + n &= (n - 1); + } + return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; +} + +static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) +{ + if (is_dp_hdcp(hdcp)) + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE; +} + +static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_R0_P_AVAILABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING; + } else { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } + return status; +} + +static inline enum mod_hdcp_status check_link_integrity_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_reauthentication_request_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_DEVS_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return is_dp_hdcp(hdcp) ? + (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) : + (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK); +} + +static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_an, + &input->an_write, &status, + hdcp, "an_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv, + &input->aksv_write, &status, + hdcp, "aksv_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(validate_bksv, + &input->bksv_validation, &status, + hdcp, "bksv_validation")) + goto out; + if (hdcp->auth.msg.hdcp1.ainfo) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo, + &input->ainfo_write, &status, + hdcp, "ainfo_write")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status computations_validate_rx_test_for_repeater( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p, + &input->r0p_read, &status, + hdcp, "r0p_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx, + &input->rx_validation, &status, + hdcp, "rx_validation")) + goto out; + if (hdcp->connection.is_repeater) { + if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance, + &input->link_maintenance, &status, + hdcp, "link_maintenance")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_ksv_ready, + &input->ready_check, &status, + hdcp, "ready_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + uint8_t device_count; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo, + &input->binfo_read_dp, &status, + hdcp, "binfo_read_dp")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_no_max_cascade, + &input->max_cascade_check, &status, + hdcp, "max_cascade_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_max_devs, + &input->max_devs_check, &status, + hdcp, "max_devs_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, &status, + hdcp, "device_count_check")) + goto out; + device_count = get_device_count(hdcp); + hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist, + &input->ksvlist_read, &status, + hdcp, "ksvlist_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp, + &input->vp_read, &status, + hdcp, "vp_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp, + &input->ksvlist_vp_validation, &status, + hdcp, "ksvlist_vp_validation")) + goto out; + if (input->encryption != PASS) + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp, + &input->hdcp_capable_dp, &status, + hdcp, "hdcp_capable_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_r0p_available_dp, + &input->r0p_available_dp, &status, + hdcp, "r0p_available_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; +out: + return status; +} + +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str) +{ + *status = func(hdcp); + if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) { + HDCP_INPUT_PASS_TRACE(hdcp, str); + *flag = PASS; + } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) { + HDCP_INPUT_FAIL_TRACE(hdcp, str); + *flag = FAIL; + } + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + status = wait_for_active_rx(hdcp, event_ctx, input); + break; + case H1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater(hdcp, + event_ctx, input); + break; + case H1_A45_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H1_A8_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case H1_A9_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + status = wait_for_r0_prime_dp(hdcp, event_ctx, input); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater( + hdcp, event_ctx, input); + break; + case D1_A4_AUTHENTICATED: + status = authenticated_dp(hdcp, event_ctx, input); + break; + case D1_A6_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case D1_A7_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c new file mode 100644 index 000000000000..136b8011ff3f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -0,0 +1,307 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + if (input->bksv_read != PASS || input->bcaps_read != PASS) { + /* 1A-04: repeatedly attempts on port access failure */ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS); + break; + case H1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(300, output); + set_state_id(hdcp, output, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + if (input->bcaps_read != PASS || + input->r0p_read != PASS || + input->rx_validation != PASS || + (!conn->is_repeater && input->encryption != PASS)) { + /* 1A-06: consider invalid r0' a failure */ + /* 1A-08: consider bksv listed in SRM a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + callback_in_ms(0, output); + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case H1_A45_AUTHENTICATED: + if (input->link_maintenance != PASS) { + /* 1A-07: consider invalid ri' a failure */ + /* 1A-07a: consider read ri' not returned a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H1_A8_WAIT_FOR_READY: + if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-03: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue ksv list READY polling*/ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A9_READ_KSV_LIST); + break; + case H1_A9_READ_KSV_LIST: + if (input->bstatus_read != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS || + input->device_count_check != PASS || + input->ksvlist_read != PASS || + input->vp_read != PASS || + input->ksvlist_vp_validation != PASS || + input->encryption != PASS) { + /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */ + /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-04: consider invalid v' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->bcaps_read != PASS) { + /* 1A-04: no authentication on bcaps read failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->hdcp_capable_dp != PASS) { + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS); + break; + case D1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + if (input->bstatus_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->r0p_available_dp != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + if (input->r0p_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->rx_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1A-06: consider invalid r0' a failure + * after 3 attempts. + * 1A-08: consider bksv listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if ((!conn->is_repeater && input->encryption != PASS) || + (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY); + } else { + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case D1_A4_AUTHENTICATED: + if (input->link_integiry_check != PASS || + input->reauth_request_check != PASS) { + /* 1A-07: restart hdcp on a link integrity failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + break; + case D1_A6_WAIT_FOR_READY: + if (input->link_integiry_check == FAIL || + input->reauth_request_check == FAIL) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-04: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A7_READ_KSV_LIST); + break; + case D1_A7_READ_KSV_LIST: + if (input->binfo_read_dp != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS) { + /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->device_count_check != PASS) { + /* + * some slow dongle doesn't update + * device count as soon as downstream is connected. + * give it more time to react. + */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(1000, &status, output); + break; + } else if (input->ksvlist_read != PASS || + input->vp_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ksvlist_vp_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1B-05: consider invalid v' a failure + * after 3 attempts. + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if (input->encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c new file mode 100644 index 000000000000..e7baae059b85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -0,0 +1,305 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/ +#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */ +#define HDCP_MAX_AUX_TRANSACTION_SIZE 16 + +enum mod_hdcp_ddc_message_id { + MOD_HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + MOD_HDCP_MESSAGE_ID_READ_BKSV = 0, + MOD_HDCP_MESSAGE_ID_READ_RI_R0, + MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + MOD_HDCP_MESSAGE_ID_WRITE_AN, + MOD_HDCP_MESSAGE_ID_READ_VH_X, + MOD_HDCP_MESSAGE_ID_READ_VH_0, + MOD_HDCP_MESSAGE_ID_READ_VH_1, + MOD_HDCP_MESSAGE_ID_READ_VH_2, + MOD_HDCP_MESSAGE_ID_READ_VH_3, + MOD_HDCP_MESSAGE_ID_READ_VH_4, + MOD_HDCP_MESSAGE_ID_READ_BCAPS, + MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + MOD_HDCP_MESSAGE_ID_READ_BINFO, + + MOD_HDCP_MESSAGE_ID_MAX +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, +}; + +static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + success = hdcp->config.ddc.funcs.read_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp_i2c_offsets[msg_id], + buf, + (uint32_t)buf_len); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len, + uint8_t read_size) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + while (buf_len > 0) { + cur_size = MIN(buf_len, read_size); + status = read(hdcp, msg_id, buf + data_offset, cur_size); + + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + + return status; +} + +static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.write_dpcd( + hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + hdcp->buf[0] = hdcp_i2c_offsets[msg_id]; + memmove(&hdcp->buf[1], buf, buf_len); + success = hdcp->config.ddc.funcs.write_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp->buf, + (uint32_t)(buf_len+1)); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV, + hdcp->auth.msg.hdcp1.bksv, + sizeof(hdcp->auth.msg.hdcp1.bksv)); +} + +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS, + &hdcp->auth.msg.hdcp1.bcaps, + sizeof(hdcp->auth.msg.hdcp1.bcaps)); +} + +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + 1); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + sizeof(hdcp->auth.msg.hdcp1.bstatus)); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0, + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, + sizeof(hdcp->auth.msg.hdcp1.r0p)); +} + +/* special case, reading repeatedly at the same address, don't use read() */ +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size, + KSV_READ_SIZE); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0, + &hdcp->auth.msg.hdcp1.vp[0], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1, + &hdcp->auth.msg.hdcp1.vp[4], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2, + &hdcp->auth.msg.hdcp1.vp[8], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3, + &hdcp->auth.msg.hdcp1.vp[12], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4, + &hdcp->auth.msg.hdcp1.vp[16], 4); +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + + return status; +} + +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + hdcp->auth.msg.hdcp1.aksv, + sizeof(hdcp->auth.msg.hdcp1.aksv)); +} + +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + &hdcp->auth.msg.hdcp1.ainfo, + sizeof(hdcp->auth.msg.hdcp1.ainfo)); +} + +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN, + hdcp->auth.msg.hdcp1.an, + sizeof(hdcp->auth.msg.hdcp1.an)); +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c new file mode 100644 index 000000000000..3982ced5f969 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -0,0 +1,163 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "hdcp.h" + +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size) +{ + const uint8_t bytes_per_line = 16, + byte_size = 3, + newline_size = 1, + terminator_size = 1; + uint32_t line_count = msg_size / bytes_per_line, + trailing_bytes = msg_size % bytes_per_line; + uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count + + byte_size * trailing_bytes + newline_size + terminator_size; + uint32_t buf_pos = 0; + uint32_t i = 0; + + if (buf_size >= target_size) { + for (i = 0; i < msg_size; i++) { + if (i % bytes_per_line == 0) + buf[buf_pos++] = '\n'; + sprintf(&buf[buf_pos], "%02X ", msg[i]); + buf_pos += byte_size; + } + buf[buf_pos++] = '\0'; + } +} + +char *mod_hdcp_status_to_str(int32_t status) +{ + switch (status) { + case MOD_HDCP_STATUS_SUCCESS: + return "MOD_HDCP_STATUS_SUCCESS"; + case MOD_HDCP_STATUS_FAILURE: + return "MOD_HDCP_STATUS_FAILURE"; + case MOD_HDCP_STATUS_RESET_NEEDED: + return "MOD_HDCP_STATUS_RESET_NEEDED"; + case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: + return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; + case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: + return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; + case MOD_HDCP_STATUS_INVALID_STATE: + return "MOD_HDCP_STATUS_INVALID_STATE"; + case MOD_HDCP_STATUS_NOT_IMPLEMENTED: + return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; + case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: + return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; + case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: + return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; + case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: + return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; + case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: + return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; + case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: + return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; + case MOD_HDCP_STATUS_DDC_FAILURE: + return "MOD_HDCP_STATUS_DDC_FAILURE"; + case MOD_HDCP_STATUS_INVALID_OPERATION: + return "MOD_HDCP_STATUS_INVALID_OPERATION"; + default: + return "MOD_HDCP_STATUS_UNKNOWN"; + } +} + +char *mod_hdcp_state_id_to_str(int32_t id) +{ + switch (id) { + case HDCP_UNINITIALIZED: + return "HDCP_UNINITIALIZED"; + case HDCP_INITIALIZED: + return "HDCP_INITIALIZED"; + case HDCP_CP_NOT_DESIRED: + return "HDCP_CP_NOT_DESIRED"; + case H1_A0_WAIT_FOR_ACTIVE_RX: + return "H1_A0_WAIT_FOR_ACTIVE_RX"; + case H1_A1_EXCHANGE_KSVS: + return "H1_A1_EXCHANGE_KSVS"; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER"; + case H1_A45_AUTHENTICATED: + return "H1_A45_AUTHENTICATED"; + case H1_A8_WAIT_FOR_READY: + return "H1_A8_WAIT_FOR_READY"; + case H1_A9_READ_KSV_LIST: + return "H1_A9_READ_KSV_LIST"; + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D1_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D1_A1_EXCHANGE_KSVS: + return "D1_A1_EXCHANGE_KSVS"; + case D1_A23_WAIT_FOR_R0_PRIME: + return "D1_A23_WAIT_FOR_R0_PRIME"; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER"; + case D1_A4_AUTHENTICATED: + return "D1_A4_AUTHENTICATED"; + case D1_A6_WAIT_FOR_READY: + return "D1_A6_WAIT_FOR_READY"; + case D1_A7_READ_KSV_LIST: + return "D1_A7_READ_KSV_LIST"; + default: + return "UNKNOWN_STATE_ID"; + }; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h new file mode 100644 index 000000000000..2fd0e0a893ef --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_LOG_H_ +#define MOD_HDCP_LOG_H_ + +#ifdef CONFIG_DRM_AMD_DC_HDCP +#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__) +#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) +#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#endif + +/* default logs */ +#define HDCP_ERROR_TRACE(hdcp, status) \ + HDCP_LOG_ERR(hdcp, \ + "[Link %d] ERROR %s IN STATE %s", \ + hdcp->config.index, \ + mod_hdcp_status_to_str(status), \ + mod_hdcp_state_id_to_str(hdcp->state.id)) +#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 1.4 enabled on display %d", \ + hdcp->config.index, displayIndex) +/* state machine logs */ +#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] HDCP_REMOVE_DISPLAY index %d", \ + hdcp->config.index, displayIndex) +#define HDCP_INPUT_PASS_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tPASS %s", \ + hdcp->config.index, str) +#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tFAIL %s", \ + hdcp->config.index, str) +#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \ + if (output->watchdog_timer_needed) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s with %d ms watchdog", \ + hdcp->config.index, \ + mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \ + else \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s", hdcp->config.index, \ + mod_hdcp_state_id_to_str(id)); \ +} while (0) +#define HDCP_TIMEOUT_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index) +#define HDCP_CPIRQ_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) +#define HDCP_EVENT_TRACE(hdcp, event) \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp) +/* TODO: find some way to tell if logging is off to save time */ +#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \ + msg_name, hdcp->buf); \ +} while (0) +#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \ + hdcp->config.index, msg_name,\ + hdcp->buf); \ +} while (0) +#define HDCP_FULL_DDC_TRACE(hdcp) do { \ + HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ + sizeof(hdcp->auth.msg.hdcp1.bksv)); \ + HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ + sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ + sizeof(hdcp->auth.msg.hdcp1.an)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ + sizeof(hdcp->auth.msg.hdcp1.aksv)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ + sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ + HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ + sizeof(hdcp->auth.msg.hdcp1.r0p)); \ + HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ + HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ + hdcp->auth.msg.hdcp1.ksvlist_size); \ + HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ + sizeof(hdcp->auth.msg.hdcp1.vp)); \ +} while (0) +#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \ + hdcp->config.index) +#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index) +#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index) +#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \ +} while (0) +#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ +} while (0) + +#endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c new file mode 100644 index 000000000000..646d909bbc37 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -0,0 +1,328 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#define MAX_NUM_DISPLAYS 24 + + +#include "hdcp.h" + +#include "amdgpu.h" +#include "hdcp_psp.h" + +enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + uint8_t i; + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) { + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + display = &hdcp->connection.displays[i]; + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + struct mod_hdcp_link *link = &hdcp->connection.link; + uint8_t i; + + if (!psp->dtm_context.dtm_initialized) { + DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) { + display = &hdcp->connection.displays[i]; + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; + dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; + dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; + dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; + dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; + dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; + dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; + hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; + memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, + sizeof(hdcp->auth.msg.hdcp1.aksv)); + memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, + sizeof(hdcp->auth.msg.hdcp1.an)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; + + HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id; + + memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv, + TA_HDCP__HDCP1_KSV_SIZE); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p; + hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) { + /* needs second part of authentication */ + hdcp->connection.is_repeater = 1; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { + hdcp->connection.is_repeater = 0; + } else + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index); + } + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id; + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size; + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp, + sizeof(hdcp->auth.msg.hdcp1.vp)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo = + is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + int i = 0; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id; + + hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + + return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + + return MOD_HDCP_STATUS_SUCCESS; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h new file mode 100644 index 000000000000..986fc07ea9ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -0,0 +1,272 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MODULES_HDCP_HDCP_PSP_H_ +#define MODULES_HDCP_HDCP_PSP_H_ + +/* + * NOTE: These parameters are a one-to-one copy of the + * parameters required by PSP + */ +enum bgd_security_hdcp_encryption_level { + HDCP_ENCRYPTION_LEVEL__INVALID = 0, + HDCP_ENCRYPTION_LEVEL__OFF, + HDCP_ENCRYPTION_LEVEL__ON +}; + +enum ta_dtm_command { + TA_DTM_COMMAND__UNUSED_1 = 1, + TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, + TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE +}; + +/* DTM related enumerations */ +/**********************************************************/ + +enum ta_dtm_status { + TA_DTM_STATUS__SUCCESS = 0x00, + TA_DTM_STATUS__GENERIC_FAILURE = 0x01, + TA_DTM_STATUS__INVALID_PARAMETER = 0x02, + TA_DTM_STATUS__NULL_POINTER = 0x3 +}; + +/* input/output structures for DTM commands */ +/**********************************************************/ +/** + * Input structures + */ +enum ta_dtm_hdcp_version_max_supported { + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23 +}; + +struct ta_dtm_topology_update_input_v2 { + /* display handle is unique across the driver and is used to identify a display */ + /* for all security interfaces which reference displays such as HDCP */ + uint32_t display_handle; + uint32_t is_active; + uint32_t is_miracast; + uint32_t controller; + uint32_t ddc_line; + uint32_t dig_be; + uint32_t dig_fe; + uint32_t dp_mst_vcid; + uint32_t is_assr; + uint32_t max_hdcp_supported_version; +}; + +struct ta_dtm_topology_assr_enable { + uint32_t display_topology_dig_be_index; +}; + +/** + * Output structures + */ + +/* No output structures yet */ + +union ta_dtm_cmd_input { + struct ta_dtm_topology_update_input_v2 topology_update_v2; + struct ta_dtm_topology_assr_enable topology_assr_enable; +}; + +union ta_dtm_cmd_output { + uint32_t reserved; +}; + +struct ta_dtm_shared_memory { + uint32_t cmd_id; + uint32_t resp_id; + enum ta_dtm_status dtm_status; + uint32_t reserved; + union ta_dtm_cmd_input dtm_in_message; + union ta_dtm_cmd_output dtm_out_message; +}; + +int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, + uint64_t fence_mc_addr); + +enum ta_hdcp_command { + TA_HDCP_COMMAND__INITIALIZE, + TA_HDCP_COMMAND__HDCP1_CREATE_SESSION, + TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION, + TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, +}; + + +/* HDCP related enumerations */ +/**********************************************************/ +#define TA_HDCP__INVALID_SESSION 0xFFFF +#define TA_HDCP__HDCP1_AN_SIZE 8 +#define TA_HDCP__HDCP1_KSV_SIZE 5 +#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 +#define TA_HDCP__HDCP1_V_PRIME_SIZE 20 + +enum ta_hdcp_status { + TA_HDCP_STATUS__SUCCESS = 0x00, + TA_HDCP_STATUS__GENERIC_FAILURE = 0x01, + TA_HDCP_STATUS__NULL_POINTER = 0x02, + TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03, + TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04, + TA_HDCP_STATUS__INVALID_PARAMETER = 0x05, + TA_HDCP_STATUS__VHX_ERROR = 0x06, + TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07, + TA_HDCP_STATUS__SRM_FAILURE = 0x08, + TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09, + TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A, + TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B, + TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C, + TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D, + TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E, + TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F, + TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10, + TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11, + TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12, + TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13, + TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14, + TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15, + TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16, + TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17 +}; + +enum ta_hdcp_authentication_status { + TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 +}; + + +/* input/output structures for HDCP commands */ +/**********************************************************/ +struct ta_hdcp_cmd_hdcp1_create_session_input { + uint8_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_create_session_output { + uint32_t session_handle; + uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_primary; + uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_input { + uint32_t session_handle; + uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bcaps; + uint16_t r0_prime_primary; + uint16_t r0_prime_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_input { + uint32_t session_handle; + uint16_t bstatus_binfo; + uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE]; + uint32_t ksv_list_size; + uint8_t pj_prime; + uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_enable_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_output { + uint32_t protection_level; +}; + +/**********************************************************/ +/* Common input structure for HDCP callbacks */ +union ta_hdcp_cmd_input { + struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption; + struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; +}; + +/* Common output structure for HDCP callbacks */ +union ta_hdcp_cmd_output { + struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; +}; +/**********************************************************/ + +struct ta_hdcp_shared_memory { + uint32_t cmd_id; + enum ta_hdcp_status hdcp_status; + uint32_t reserved; + union ta_hdcp_cmd_input in_msg; + union ta_hdcp_cmd_output out_msg; +}; + +enum psp_status { + PSP_STATUS__SUCCESS = 0, + PSP_STATUS__ERROR_INVALID_PARAMS, + PSP_STATUS__ERROR_GENERIC, + PSP_STATUS__ERROR_OUT_OF_MEMORY, + PSP_STATUS__ERROR_UNSUPPORTED_FEATURE +}; + +#endif /* MODULES_HDCP_HDCP_PSP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dc187844d10b..dbe7835aabcf 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,6 +92,7 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; + uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h new file mode 100644 index 000000000000..dea21702edff --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -0,0 +1,289 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_H_ +#define MOD_HDCP_H_ + +#include "os_types.h" +#include "signal_types.h" + +/* Forward Declarations */ +struct mod_hdcp; + +#define MAX_NUM_OF_DISPLAYS 6 +#define MAX_NUM_OF_ATTEMPTS 4 +#define MAX_NUM_OF_ERROR_TRACE 10 + +/* detailed return status */ +enum mod_hdcp_status { + MOD_HDCP_STATUS_SUCCESS = 0, + MOD_HDCP_STATUS_FAILURE, + MOD_HDCP_STATUS_RESET_NEEDED, + MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, + MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, + MOD_HDCP_STATUS_INVALID_STATE, + MOD_HDCP_STATUS_NOT_IMPLEMENTED, + MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, + MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, + MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, + MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, + MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, + MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ + MOD_HDCP_STATUS_INVALID_OPERATION, + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, +}; + +struct mod_hdcp_displayport { + uint8_t rev; + uint8_t assr_supported; +}; + +struct mod_hdcp_hdmi { + uint8_t reserved; +}; +enum mod_hdcp_operation_mode { + MOD_HDCP_MODE_OFF, + MOD_HDCP_MODE_DEFAULT, + MOD_HDCP_MODE_DP, + MOD_HDCP_MODE_DP_MST +}; + +enum mod_hdcp_display_state { + MOD_HDCP_DISPLAY_INACTIVE = 0, + MOD_HDCP_DISPLAY_ACTIVE, + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, + MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED +}; + +struct mod_hdcp_ddc { + void *handle; + struct { + bool (*read_i2c)(void *handle, + uint32_t address, + uint8_t offset, + uint8_t *data, + uint32_t size); + bool (*write_i2c)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + bool (*read_dpcd)(void *handle, + uint32_t address, + uint8_t *data, + uint32_t size); + bool (*write_dpcd)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + } funcs; +}; + +struct mod_hdcp_psp { + void *handle; + void *funcs; +}; + +struct mod_hdcp_display_adjustment { + uint8_t disable : 1; + uint8_t reserved : 7; +}; + +struct mod_hdcp_link_adjustment_hdcp1 { + uint8_t disable : 1; + uint8_t postpone_encryption : 1; + uint8_t reserved : 6; +}; + +struct mod_hdcp_link_adjustment_hdcp2 { + uint8_t disable : 1; + uint8_t disable_type1 : 1; + uint8_t force_no_stored_km : 1; + uint8_t increase_h_prime_timeout: 1; + uint8_t reserved : 4; +}; + +struct mod_hdcp_link_adjustment { + uint8_t auth_delay; + struct mod_hdcp_link_adjustment_hdcp1 hdcp1; + struct mod_hdcp_link_adjustment_hdcp2 hdcp2; +}; + +struct mod_hdcp_error { + enum mod_hdcp_status status; + uint8_t state_id; +}; + +struct mod_hdcp_trace { + struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; + uint8_t error_count; +}; + +enum mod_hdcp_encryption_status { + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, + MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON +}; + +/* per link events dm has to notify to hdcp module */ +enum mod_hdcp_event { + MOD_HDCP_EVENT_CALLBACK = 0, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + MOD_HDCP_EVENT_CPIRQ +}; + +/* output flags from module requesting timer operations */ +struct mod_hdcp_output { + uint8_t callback_needed; + uint8_t callback_stop; + uint8_t watchdog_timer_needed; + uint8_t watchdog_timer_stop; + uint16_t callback_delay; + uint16_t watchdog_timer_delay; +}; + +/* used to represent per display info */ +struct mod_hdcp_display { + enum mod_hdcp_display_state state; + uint8_t index; + uint8_t controller; + uint8_t dig_fe; + union { + uint8_t vc_id; + }; + struct mod_hdcp_display_adjustment adjust; +}; + +/* used to represent per link info */ +/* in case a link has multiple displays, they share the same link info */ +struct mod_hdcp_link { + enum mod_hdcp_operation_mode mode; + uint8_t dig_be; + uint8_t ddc_line; + union { + struct mod_hdcp_displayport dp; + struct mod_hdcp_hdmi hdmi; + }; + struct mod_hdcp_link_adjustment adjust; +}; + +/* a query structure for a display's hdcp information */ +struct mod_hdcp_display_query { + const struct mod_hdcp_display *display; + const struct mod_hdcp_link *link; + const struct mod_hdcp_trace *trace; + enum mod_hdcp_encryption_status encryption_status; +}; + +/* contains values per on external display configuration change */ +struct mod_hdcp_config { + struct mod_hdcp_psp psp; + struct mod_hdcp_ddc ddc; + uint8_t index; +}; + +struct mod_hdcp; + +/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/ +size_t mod_hdcp_get_memory_size(void); + +/* called per link on link creation */ +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config); + +/* called per link on link destroy */ +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp); + +/* called per display on cp_desired set to true */ +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output); + +/* called per display on cp_desired set to false */ +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output); + +/* called to query hdcp information on a specific index */ +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query); + +/* called per link on connectivity change */ +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output); + +/* called per link on events (i.e. callback, watchdog, CP_IRQ) */ +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output); + +/* called to convert enum mod_hdcp_status to c string */ +char *mod_hdcp_status_to_str(int32_t status); + +/* called to convert state id to c string */ +char *mod_hdcp_state_id_to_str(int32_t id); + +/* called to convert signal type to operation mode */ +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal); +#endif /* MOD_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index d930bdecb117..ca8ce3c55337 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -35,4 +35,7 @@ struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); + #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index d885d642ed7f..db6b08f6d093 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -31,6 +31,7 @@ #include "dc.h" #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 +#define HF_VSIF_VERSION 1 // VTEM Byte Offset #define VTEM_PB0 0 @@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } +/** + ***************************************************************************** + * Function: mod_build_hf_vsif_infopacket + * + * @brief + * Prepare HDMI Vendor Specific info frame. + * Follows HDMI Spec to build up Vendor Specific info frame + * + * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.) + * @param [out] info_packet: output structure where to store VSIF + ***************************************************************************** + */ +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue) +{ + unsigned int length = 5; + bool hdmi_vic_mode = false; + uint8_t checksum = 0; + uint32_t i = 0; + enum dc_timing_3d_format format; + bool bALLM = (bool)ALLMEnabled; + bool bALLMVal = (bool)ALLMValue; + + info_packet->valid = false; + format = stream->timing.timing_3d_format; + if (stream->view_format == VIEW_3D_FORMAT_NONE) + format = TIMING_3D_FORMAT_NONE; + + if (stream->timing.hdmi_vic != 0 + && stream->timing.h_total >= 3840 + && stream->timing.v_total >= 2160 + && format == TIMING_3D_FORMAT_NONE) + hdmi_vic_mode = true; + + if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM) + return; + + info_packet->sb[1] = 0x03; + info_packet->sb[2] = 0x0C; + info_packet->sb[3] = 0x00; + + if (bALLM) { + info_packet->sb[1] = 0xD8; + info_packet->sb[2] = 0x5D; + info_packet->sb[3] = 0xC4; + info_packet->sb[4] = HF_VSIF_VERSION; + } + + if (format != TIMING_3D_FORMAT_NONE) + info_packet->sb[4] = (2 << 5); + + else if (hdmi_vic_mode) + info_packet->sb[4] = (1 << 5); + + switch (format) { + case TIMING_3D_FORMAT_HW_FRAME_PACKING: + case TIMING_3D_FORMAT_SW_FRAME_PACKING: + info_packet->sb[5] = (0x0 << 4); + break; + + case TIMING_3D_FORMAT_SIDE_BY_SIDE: + case TIMING_3D_FORMAT_SBS_SW_PACKED: + info_packet->sb[5] = (0x8 << 4); + length = 6; + break; + + case TIMING_3D_FORMAT_TOP_AND_BOTTOM: + case TIMING_3D_FORMAT_TB_SW_PACKED: + info_packet->sb[5] = (0x6 << 4); + break; + + default: + break; + } + + if (hdmi_vic_mode) + info_packet->sb[5] = stream->timing.hdmi_vic; + + info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; + info_packet->hb1 = 0x01; + info_packet->hb2 = (uint8_t) (length); + + if (bALLM) + info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1); + + checksum += info_packet->hb0; + checksum += info_packet->hb1; + checksum += info_packet->hb2; + + for (i = 1; i <= length; i++) + checksum += info_packet->sb[i]; + + info_packet->sb[0] = (uint8_t) (0x100 - checksum); + + info_packet->valid = true; +} + diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 05e2be856037..4e2f615c3566 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -80,18 +80,18 @@ struct abm_parameters { static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0}, - {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0}, - {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70}, + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0}, + {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf}, + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0}, + {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters * const abm_settings[] = { @@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = { /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ - uint16_t flags; /* 0x00 U16 */ + uint16_t min_abm_backlight; /* 0x00 U16 */ /* parameters for ABM2.0 algorithm */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ @@ -140,10 +140,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -164,42 +164,43 @@ struct iram_table_v_2_2 { uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ - uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ - uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ - uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ - uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ - uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ - uint8_t pad[21]; /* 0x6b U0.8 */ + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ + uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ + uint16_t min_abm_backlight; /* 0x6b U16 */ + uint8_t pad[19]; /* 0x6d U0.8 */ /* parameters for crgb conversion */ - uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ /* parameters for custom curve */ /* thresholds for brightness --> backlight */ - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ /* offsets for brightness --> backlight */ - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ /* For reading PSR State directly from IRAM */ - uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ - uint8_t dmcu_state; /* 0xf6 */ - - uint8_t dummy1; /* 0xf7 */ - uint8_t dummy2; /* 0xf8 */ - uint8_t dummy3; /* 0xf9 */ - uint8_t dummy4; /* 0xfa */ - uint8_t dummy5; /* 0xfb */ - uint8_t dummy6; /* 0xfc */ - uint8_t dummy7; /* 0xfd */ - uint8_t dummy8; /* 0xfe */ - uint8_t dummy9; /* 0xff */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ }; #pragma pack(pop) @@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters { unsigned int set = params.set; - ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); ram_table->deviation_gain = 0xb3; ram_table->blRampReduction = @@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + ram_table->deviation_gain[0] = 0xb3; ram_table->deviation_gain[1] = 0xa8; ram_table->deviation_gain[2] = 0x98; @@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame unsigned int set = params.set; ram_table->flags = 0x0; + + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index da5df00fedce..e54157026330 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -38,6 +38,7 @@ struct dmcu_iram_parameters { unsigned int backlight_lut_array_size; unsigned int backlight_ramping_reduction; unsigned int backlight_ramping_start; + unsigned int min_abm_backlight; unsigned int set; }; |