diff options
Diffstat (limited to 'drivers/gpu')
87 files changed, 1075 insertions, 395 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a59c07590cee..7dcbac8af9a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -190,6 +190,7 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; +struct amdgpu_atif; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch { /* * ACPI */ -struct amdgpu_atif_notification_cfg { - bool enabled; - int command_code; -}; - -struct amdgpu_atif_notifications { - bool display_switch; - bool expansion_mode_change; - bool thermal_state; - bool forced_power_state; - bool system_power_state; - bool display_conf_change; - bool px_gfx_switch; - bool brightness_change; - bool dgpu_display_event; -}; - -struct amdgpu_atif_functions { - bool system_params; - bool sbios_requests; - bool select_active_disp; - bool lid_state; - bool get_tv_standard; - bool set_tv_standard; - bool get_panel_expansion_mode; - bool set_panel_expansion_mode; - bool temperature_change; - bool graphics_device_types; -}; - -struct amdgpu_atif { - struct amdgpu_atif_notifications notifications; - struct amdgpu_atif_functions functions; - struct amdgpu_atif_notification_cfg notification_cfg; - struct amdgpu_encoder *encoder_for_bl; -}; - struct amdgpu_atcs_functions { bool get_ext_state; bool pcie_perf_req; @@ -1466,7 +1430,7 @@ struct amdgpu_device { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; #endif - struct amdgpu_atif atif; + struct amdgpu_atif *atif; struct amdgpu_atcs atcs; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ @@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; static inline bool amdgpu_has_atpx(void) { return false; } #endif +#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void); +#else +static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } +#endif + /* * KMS */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index f4c474a95875..71efcf38f11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -57,6 +57,10 @@ #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 +#define ACP_BT_PLAY_REGS_START 0x14970 +#define ACP_BT_PLAY_REGS_END 0x14a24 +#define ACP_BT_COMP1_REG_OFFSET 0xac +#define ACP_BT_COMP2_REG_OFFSET 0xa8 #define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_CONFIG_REG 0x51ca @@ -77,7 +81,7 @@ #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF #define ACP_TIMEOUT_LOOP 0x000000FF -#define ACP_DEVS 3 +#define ACP_DEVS 4 #define ACP_SRC_ID 162 enum { @@ -316,14 +320,13 @@ static int acp_hw_init(void *handle) if (adev->acp.acp_cell == NULL) return -ENOMEM; - adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); - + adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { kfree(adev->acp.acp_cell); return -ENOMEM; } - i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); + i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); @@ -358,6 +361,20 @@ static int acp_hw_init(void *handle) i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; + i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + break; + } + + i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; + i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; + i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; + adev->acp.acp_res[0].name = "acp2x_dma"; adev->acp.acp_res[0].flags = IORESOURCE_MEM; adev->acp.acp_res[0].start = acp_base; @@ -373,13 +390,18 @@ static int acp_hw_init(void *handle) adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; - adev->acp.acp_res[3].name = "acp2x_dma_irq"; - adev->acp.acp_res[3].flags = IORESOURCE_IRQ; - adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); - adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; + adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; + adev->acp.acp_res[3].flags = IORESOURCE_MEM; + adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; + adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; + + adev->acp.acp_res[4].name = "acp2x_dma_irq"; + adev->acp.acp_res[4].flags = IORESOURCE_IRQ; + adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); + adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; - adev->acp.acp_cell[0].num_resources = 4; + adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); @@ -396,6 +418,12 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); + adev->acp.acp_cell[3].name = "designware-i2s"; + adev->acp.acp_cell[3].num_resources = 1; + adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; + adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; + adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) @@ -451,7 +479,6 @@ static int acp_hw_init(void *handle) val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8fa850a070e0..0d8c3fc6eace 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -34,6 +34,45 @@ #include "amd_acpi.h" #include "atom.h" +struct amdgpu_atif_notification_cfg { + bool enabled; + int command_code; +}; + +struct amdgpu_atif_notifications { + bool display_switch; + bool expansion_mode_change; + bool thermal_state; + bool forced_power_state; + bool system_power_state; + bool display_conf_change; + bool px_gfx_switch; + bool brightness_change; + bool dgpu_display_event; +}; + +struct amdgpu_atif_functions { + bool system_params; + bool sbios_requests; + bool select_active_disp; + bool lid_state; + bool get_tv_standard; + bool set_tv_standard; + bool get_panel_expansion_mode; + bool set_panel_expansion_mode; + bool temperature_change; + bool graphics_device_types; +}; + +struct amdgpu_atif { + acpi_handle handle; + + struct amdgpu_atif_notifications notifications; + struct amdgpu_atif_functions functions; + struct amdgpu_atif_notification_cfg notification_cfg; + struct amdgpu_encoder *encoder_for_bl; +}; + /* Call the ATIF method */ /** @@ -46,8 +85,9 @@ * Executes the requested ATIF function (all asics). * Returns a pointer to the acpi output buffer. */ -static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, - struct acpi_buffer *params) +static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, + int function, + struct acpi_buffer *params) { acpi_status status; union acpi_object atif_arg_elements[2]; @@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, atif_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); + status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, + &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas * (all asics). * returns 0 on success, error on failure. */ -static int amdgpu_atif_verify_interface(acpi_handle handle, - struct amdgpu_atif *atif) +static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif) { union acpi_object *info; struct atif_verify_interface output; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; @@ -176,6 +216,35 @@ out: return err; } +static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) +{ + acpi_handle handle = NULL; + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; + acpi_status status; + + /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only + * systems, ATIF is in the dGPU's namespace. + */ + status = acpi_get_handle(dhandle, "ATIF", &handle); + if (ACPI_SUCCESS(status)) + goto out; + + if (amdgpu_has_atpx()) { + status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", + &handle); + if (ACPI_SUCCESS(status)) + goto out; + } + + DRM_DEBUG_DRIVER("No ATIF handle found\n"); + return NULL; +out: + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); + return handle; +} + /** * amdgpu_atif_get_notification_params - determine notify configuration * @@ -188,15 +257,16 @@ out: * where n is specified in the result if a notifier is used. * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_notification_params(acpi_handle handle, - struct amdgpu_atif_notification_cfg *n) +static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif) { union acpi_object *info; + struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; struct atif_system_params params; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, + NULL); if (!info) { err = -EIO; goto out; @@ -250,14 +320,15 @@ out: * (all asics). * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_sbios_requests(acpi_handle handle, - struct atif_sbios_requests *req) +static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, + struct atif_sbios_requests *req) { union acpi_object *info; size_t size; int count = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, + NULL); if (!info) return -EIO; @@ -290,11 +361,10 @@ out: * Returns NOTIFY code */ static int amdgpu_atif_handler(struct amdgpu_device *adev, - struct acpi_bus_event *event) + struct acpi_bus_event *event) { - struct amdgpu_atif *atif = &adev->atif; + struct amdgpu_atif *atif = adev->atif; struct atif_sbios_requests req; - acpi_handle handle; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; - if (!atif->notification_cfg.enabled || + if (!atif || + !atif->notification_cfg.enabled || event->type != atif->notification_cfg.command_code) /* Not our event */ return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = ACPI_HANDLE(&adev->pdev->dev); - count = amdgpu_atif_get_sbios_requests(handle, &req); + count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) return NOTIFY_DONE; @@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb, */ int amdgpu_acpi_init(struct amdgpu_device *adev) { - acpi_handle handle; - struct amdgpu_atif *atif = &adev->atif; + acpi_handle handle, atif_handle; + struct amdgpu_atif *atif; struct amdgpu_atcs *atcs = &adev->atcs; int ret; @@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); } + /* Probe for ATIF, and initialize it if found */ + atif_handle = amdgpu_atif_probe_handle(handle); + if (!atif_handle) + goto out; + + atif = kzalloc(sizeof(*atif), GFP_KERNEL); + if (!atif) { + DRM_WARN("Not enough memory to initialize ATIF\n"); + goto out; + } + atif->handle = atif_handle; + /* Call the ATIF method */ - ret = amdgpu_atif_verify_interface(handle, atif); + ret = amdgpu_atif_verify_interface(atif); if (ret) { DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); + kfree(atif); goto out; } + adev->atif = atif; if (atif->notifications.brightness_change) { struct drm_encoder *tmp; @@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } if (atif->functions.system_params) { - ret = amdgpu_atif_get_notification_params(handle, - &atif->notification_cfg); + ret = amdgpu_atif_get_notification_params(atif); if (ret) { DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", ret); @@ -720,4 +803,6 @@ out: void amdgpu_acpi_fini(struct amdgpu_device *adev) { unregister_acpi_notifier(&adev->acpi_nb); + if (adev->atif) + kfree(adev->atif); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index daa06e7c5bb7..ca8bf1c9a98e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } +#if defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void) { + return amdgpu_atpx_priv.dhandle; +} +#endif + /** * amdgpu_atpx_call - call an ATPX method * @@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82312a7bc6ad..9c85a90be293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, r = amdgpu_bo_vm_update_pte(p); if (r) return r; + + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); + if (r) + return r; } return amdgpu_cs_sync_rings(p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e5284e6c028..2c5f093e79e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2747,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) return r; + /* Make sure IB tests flushed */ + flush_delayed_work(&adev->late_init_work); + /* blat the mode back in */ if (fbcon) { if (!amdgpu_device_has_dc_support(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f70eeed9ed76..7aaa263ad8c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; + /* wrap the last IB with fence */ + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, + fence_flags | AMDGPU_FENCE_FLAG_64BIT); + } + r = amdgpu_fence_emit(ring, f, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); @@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->insert_end) ring->funcs->insert_end(ring); - /* wrap the last IB with fence */ - if (job && job->uf_addr) { - amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, - fence_flags | AMDGPU_FENCE_FLAG_64BIT); - } - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b455da487782..fc818b4d849c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index edf16b2b957a..fdcb498f6d19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, return; list_add_tail(&base->bo_list, &bo->va); + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&base->vm_status, &vm->relocated); + if (bo->tbo.resv != vm->root.base.bo->tbo.resv) return; @@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - list_move(&entry->base.vm_status, &vm->relocated); } if (level < AMDGPU_VM_PTB) { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 0999c843f623..a71b97519cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { .emit_frame_size = 4 + /* vce_v3_0_emit_pipeline_sync */ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ - .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, .test_ring = amdgpu_vce_ring_test_ring, @@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 6 + /* vce_v3_0_emit_vm_flush */ 4 + /* vce_v3_0_emit_pipeline_sync */ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ - .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ .emit_ib = vce_v3_0_ring_emit_ib, .emit_vm_flush = vce_v3_0_emit_vm_flush, .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3a8d6356afc2..770c6b24be0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } +static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) +{ + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + + timing_out->display_color_depth--; +} + +static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) +{ + int normalized_clk; + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + do { + normalized_clk = timing_out->pix_clk_khz; + /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ + if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) + normalized_clk /= 2; + /* Adjusting pix clock following on HDMI spec based on colour depth */ + switch (timing_out->display_color_depth) { + case COLOR_DEPTH_101010: + normalized_clk = (normalized_clk * 30) / 24; + break; + case COLOR_DEPTH_121212: + normalized_clk = (normalized_clk * 36) / 24; + break; + case COLOR_DEPTH_161616: + normalized_clk = (normalized_clk * 48) / 24; + break; + default: + return; + } + if (normalized_clk <= info->max_tmds_clock) + return; + reduce_mode_colour_depth(timing_out); + + } while (timing_out->display_color_depth > COLOR_DEPTH_888); + +} /*****************************************************************************/ static void @@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, const struct drm_connector *connector) { struct dc_crtc_timing *timing_out = &stream->timing; + const struct drm_display_info *info = &connector->display_info; memset(timing_out, 0, sizeof(struct dc_crtc_timing)); @@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->v_border_top = 0; timing_out->v_border_bottom = 0; /* TODO: un-hardcode */ - - if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) + if (drm_mode_is_420_only(info, mode_in) + && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else @@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + adjust_colour_depth_from_display_info(timing_out, info); } static void fill_audio_info(struct audio_info *audio_info, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 4304d9e408b8..ace9ad578ca0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? I2C_MOT_TRUE : I2C_MOT_FALSE; enum ddc_result res; - uint32_t read_bytes = msg->size; + ssize_t read_bytes; if (WARN_ON(msg->size > 16)) return -E2BIG; switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_READ: - res = dal_ddc_service_read_dpcd_data( + read_bytes = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, false, I2C_MOT_UNDEF, msg->address, msg->buffer, - msg->size, - &read_bytes); - break; + msg->size); + return read_bytes; case DP_AUX_NATIVE_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, msg->size); break; case DP_AUX_I2C_READ: - res = dal_ddc_service_read_dpcd_data( + read_bytes = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, true, mot, msg->address, msg->buffer, - msg->size, - &read_bytes); - break; + msg->size); + return read_bytes; case DP_AUX_I2C_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, r == DDC_RESULT_SUCESSFULL); #endif - if (res != DDC_RESULT_SUCESSFULL) - return -EIO; - return read_bytes; + return msg->size; } static enum drm_connector_status diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 5a3346124a01..5a2e952c5bea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency( DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < clk_level_info->num_levels; i++) { - DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); - clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz); + /* translate 10kHz to kHz */ + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10; clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index ae48d603ebd6..49c2face1e7a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data( return ret; } -enum ddc_result dal_ddc_service_read_dpcd_data( +ssize_t dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len, - uint32_t *read) + uint32_t len) { struct aux_payload read_payload = { .i2c_over_aux = i2c, @@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data( .mot = mot }; - *read = 0; - if (len > DEFAULT_AUX_MAX_DATA_SIZE) { BREAK_TO_DEBUGGER(); return DDC_RESULT_FAILED_INVALID_OPERATION; @@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data( ddc->ctx->i2caux, ddc->ddc_pin, &command)) { - *read = command.payloads->length; - return DDC_RESULT_SUCESSFULL; + return (ssize_t)command.payloads->length; } return DDC_RESULT_FAILED_OPERATION; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7857cb42b3e6..bdd121485cbc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1767,12 +1767,10 @@ static void dp_test_send_link_training(struct dc_link *link) dp_retrain_link_dp_test(link, &link_settings, false); } -/* TODO hbr2 compliance eye output is unstable +/* TODO Raven hbr2 compliance eye output is unstable * (toggling on and off) with debugger break * This caueses intermittent PHY automation failure * Need to look into the root cause */ -static uint8_t force_tps4_for_cp2520 = 1; - static void dp_test_send_phy_test_pattern(struct dc_link *link) { union phy_test_pattern dpcd_test_pattern; @@ -1832,13 +1830,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) break; case PHY_TEST_PATTERN_CP2520_1: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; case PHY_TEST_PATTERN_CP2520_2: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9cfde0ccf4e9..53c71296f3dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -76,6 +76,7 @@ struct dc_caps { bool is_apu; bool dual_link_dvi; bool post_blend_color_processing; + bool force_dp_tps4_for_cp2520; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index b235a75355b8..bae752332a9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = { .mem_input_is_flip_pending = dce_mi_is_flip_pending }; +static struct mem_input_funcs dce112_mi_funcs = { + .mem_input_program_display_marks = dce112_mi_program_display_marks, + .allocate_mem_input = dce_mi_allocate_dmif, + .free_mem_input = dce_mi_free_dmif, + .mem_input_program_surface_flip_and_addr = + dce_mi_program_surface_flip_and_addr, + .mem_input_program_pte_vm = dce_mi_program_pte_vm, + .mem_input_program_surface_config = + dce_mi_program_surface_config, + .mem_input_is_flip_pending = dce_mi_is_flip_pending +}; + +static struct mem_input_funcs dce120_mi_funcs = { + .mem_input_program_display_marks = dce120_mi_program_display_marks, + .allocate_mem_input = dce_mi_allocate_dmif, + .free_mem_input = dce_mi_free_dmif, + .mem_input_program_surface_flip_and_addr = + dce_mi_program_surface_flip_and_addr, + .mem_input_program_pte_vm = dce_mi_program_pte_vm, + .mem_input_program_surface_config = + dce_mi_program_surface_config, + .mem_input_is_flip_pending = dce_mi_is_flip_pending +}; void dce_mem_input_construct( struct dce_mem_input *dce_mi, @@ -769,7 +792,7 @@ void dce112_mem_input_construct( const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); - dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks; + dce_mi->base.funcs = &dce112_mi_funcs; } void dce120_mem_input_construct( @@ -781,5 +804,5 @@ void dce120_mem_input_construct( const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); - dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; + dce_mi->base.funcs = &dce120_mi_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 38ec0d609297..344dd2e69e7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -678,9 +678,22 @@ bool dce100_validate_bandwidth( struct dc *dc, struct dc_state *context) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw.dce.dispclk_khz = 681000; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + } else { + context->bw.dce.dispclk_khz = 0; + context->bw.dce.yclk_khz = 0; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index df5cb2d1d164..34dac84066a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1027,6 +1027,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ + dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 30b3a08b91be..090b7a8dd67b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); -enum ddc_result dal_ddc_service_read_dpcd_data( +ssize_t dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len, - uint32_t *read); + uint32_t len); enum ddc_result dal_ddc_service_write_dpcd_data( struct ddc_service *ddc, diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 092d800b703a..33b4de4ad66e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1 uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; - uint32_t boardreserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t boardreserved[9]; }; /* diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 5325661fedff..d27c1c9df286 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI return 0; } +static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_2 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + +static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_1 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values) { - struct atom_firmware_info_v3_1 *info = NULL; + struct atom_firmware_info_v3_2 *fwinfo_3_2; + struct atom_firmware_info_v3_1 *fwinfo_3_1; + struct atom_common_table_header *info = NULL; uint16_t ix; ix = GetIndexIntoMasterDataTable(firmwareinfo); - info = (struct atom_firmware_info_v3_1 *) + info = (struct atom_common_table_header *) smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); @@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, return -EINVAL; } - boot_values->ulRevision = info->firmware_revision; - boot_values->ulGfxClk = info->bootup_sclk_in10khz; - boot_values->ulUClk = info->bootup_mclk_in10khz; - boot_values->usVddc = info->bootup_vddc_mv; - boot_values->usVddci = info->bootup_vddci_mv; - boot_values->usMvddc = info->bootup_mvddc_mv; - boot_values->usVddGfx = info->bootup_vddgfx_mv; - boot_values->ucCoolingID = info->coolingsolution_id; - boot_values->ulSocClk = 0; - boot_values->ulDCEFClk = 0; + if ((info->format_revision == 3) && (info->content_revision == 2)) { + fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr, + boot_values, fwinfo_3_2); + } else if ((info->format_revision == 3) && (info->content_revision == 1)) { + fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr, + boot_values, fwinfo_3_1); + } else { + pr_info("Fw info table revision does not match!"); + return -EINVAL; + } return 0; } @@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; + param->Vr2_I2C_address = info->Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe10aa4db5e6..22e21668c93a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulUClk; uint32_t ulSocClk; uint32_t ulDCEFClk; + uint32_t ulEClk; + uint32_t ulVClk; + uint32_t ulDClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters uint8_t acggfxclkspreadenabled; uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; + + uint8_t Vr2_I2C_address; }; int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 782e2098824d..c98e5de777cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disallowed_features = 0x0; data->registry_data.od_state_in_dc_support = 0; + data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; data->registry_data.log_avfs_param = 0; @@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; + data->vbios_boot_state.eclock = boot_up_values.ulEClk; + data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.vclock = boot_up_values.ulVClk; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index e81ded1ec198..49b38df8c7f2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -167,6 +167,9 @@ struct vega12_vbios_boot_state { uint32_t mem_clock; uint32_t soc_clock; uint32_t dcef_clock; + uint32_t eclock; + uint32_t dclock; + uint32_t vclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 888ddca902d8..29914700ee82 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; } + ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index 2f8a3b983cce..b08526fd1619 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -499,7 +499,10 @@ typedef struct { uint8_t AcgGfxclkSpreadPercent; uint16_t AcgGfxclkSpreadFreq; - uint32_t BoardReserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t BoardReserved[9]; uint32_t MmHubPadding[7]; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index d644a9bb9078..9f407c48d4f0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; + uint32_t num_entries = 0; if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); @@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) } toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->num_entries = 0; toc->structure_version = 1; PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + UCODE_ID_RLC_G, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_CE, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_PFP, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_ME, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + UCODE_ID_SDMA0, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + UCODE_ID_SDMA1, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); if (!hwmgr->not_vf) PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), + UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); + toc->num_entries = num_entries; smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 03eeee11dd5b..42a40daff132 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg) u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); /* - * This is rediculous - rather than writing bits to clear, we - * have to set the actual status register value. This is racy. + * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR + * is set. Writing has some other effect to acknowledge the IRQ - + * without this, we only get a single IRQ. */ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); @@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc, static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); return 0; } static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); } static const struct drm_crtc_funcs armada_crtc_funcs = { @@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h index 27319a8335e2..345dc4d0851e 100644 --- a/drivers/gpu/drm/armada/armada_hw.h +++ b/drivers/gpu/drm/armada/armada_hw.h @@ -160,6 +160,7 @@ enum { CFG_ALPHAM_GRA = 0x1 << 16, CFG_ALPHAM_CFG = 0x2 << 16, CFG_ALPHA_MASK = 0xff << 8, +#define CFG_ALPHA(x) ((x) << 8) CFG_PIXCMD_MASK = 0xff, }; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index c391955009d6..afa7ded3ae31 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -28,6 +28,7 @@ struct armada_ovl_plane_properties { uint16_t contrast; uint16_t saturation; uint32_t colorkey_mode; + uint32_t colorkey_enable; }; struct armada_ovl_plane { @@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop, writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); spin_lock_irq(&dcrtc->irq_lock); - armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, - CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, - dcrtc->base + LCD_SPU_DMA_CTRL1); - - armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); + armada_updatel(prop->colorkey_mode, + CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, + dcrtc->base + LCD_SPU_DMA_CTRL1); + if (dcrtc->variant->has_spu_adv_reg) + armada_updatel(prop->colorkey_enable, + ADV_GRACOLORKEY | ADV_VIDCOLORKEY, + dcrtc->base + LCD_SPU_ADV_REG); spin_unlock_irq(&dcrtc->irq_lock); } @@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane, dplane->prop.colorkey_vb |= K2B(val); update_attr = true; } else if (property == priv->colorkey_mode_prop) { - dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; - dplane->prop.colorkey_mode |= CFG_CKMODE(val); + if (val == CKMODE_DISABLE) { + dplane->prop.colorkey_mode = + CFG_CKMODE(CKMODE_DISABLE) | + CFG_ALPHAM_CFG | CFG_ALPHA(255); + dplane->prop.colorkey_enable = 0; + } else { + dplane->prop.colorkey_mode = + CFG_CKMODE(val) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; + } update_attr = true; } else if (property == priv->brightness_prop) { dplane->prop.brightness = val - 256; @@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) dplane->prop.colorkey_yr = 0xfefefe00; dplane->prop.colorkey_ug = 0x01010100; dplane->prop.colorkey_vb = 0x01010100; - dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); + dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; dplane->prop.brightness = 0; dplane->prop.contrast = 0x4000; dplane->prop.saturation = 0x4000; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 73021b388e12..dd3ff2f2cdce 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work) else status = connector_status_disconnected; + /* + * The bridge resets its registers on unplug. So when we get a plug + * event and we're already supposed to be powered, cycle the bridge to + * restore its state. + */ + if (status == connector_status_connected && + adv7511->connector.status == connector_status_disconnected && + adv7511->powered) { + regcache_mark_dirty(adv7511->regmap); + adv7511_power_on(adv7511); + } + if (adv7511->connector.status != status) { adv7511->connector.status = status; if (status == connector_status_disconnected) diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 250effa0e6b8..a6e8f4591e63 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -14,6 +14,7 @@ #include <drm/bridge/mhl.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <linux/clk.h> #include <linux/delay.h> @@ -72,9 +73,7 @@ struct sii8620 { struct regulator_bulk_data supplies[2]; struct mutex lock; /* context lock, protects fields below */ int error; - int pixel_clock; unsigned int use_packed_pixel:1; - int video_code; enum sii8620_mode mode; enum sii8620_sink_type sink_type; u8 cbus_status; @@ -82,7 +81,6 @@ struct sii8620 { u8 xstat[MHL_XDS_SIZE]; u8 devcap[MHL_DCAP_SIZE]; u8 xdevcap[MHL_XDC_SIZE]; - u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; bool feature_complete; bool devcap_read; bool sink_detected; @@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx) static void sii8620_set_format(struct sii8620 *ctx) { + u8 out_fmt; + if (sii8620_is_mhl3(ctx)) { sii8620_setbits(ctx, REG_M3_P0CTRL, BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, ctx->use_packed_pixel ? ~0 : 0); } else { + if (ctx->use_packed_pixel) { + sii8620_write_seq_static(ctx, + REG_VID_MODE, BIT_VID_MODE_M1080P, + REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, + REG_MHLTX_CTL6, 0x60 + ); + } else { sii8620_write_seq_static(ctx, REG_VID_MODE, 0, REG_MHL_TOP_CTL, 1, REG_MHLTX_CTL6, 0xa0 ); + } } + if (ctx->use_packed_pixel) + out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); + else + out_fmt = VAL_TPI_FORMAT(RGB, FULL); + sii8620_write_seq(ctx, REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), - REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), + REG_TPI_OUTPUT, out_fmt, ); } @@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame, return frm_len; } -static void sii8620_set_infoframes(struct sii8620 *ctx) +static void sii8620_set_infoframes(struct sii8620 *ctx, + struct drm_display_mode *mode) { struct mhl3_infoframe mhl_frm; union hdmi_infoframe frm; u8 buf[31]; int ret; + ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, + mode, + true); + if (ctx->use_packed_pixel) + frm.avi.colorspace = HDMI_COLORSPACE_YUV422; + + if (!ret) + ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); + if (ret > 0) + sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); + if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3, - ARRAY_SIZE(ctx->avif) - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) return; } - ret = hdmi_avi_infoframe_init(&frm.avi); - frm.avi.colorspace = HDMI_COLORSPACE_YUV422; - frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; - frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9; - frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709; - frm.avi.video_code = ctx->video_code; - if (!ret) - ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); - if (ret > 0) - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) static void sii8620_start_video(struct sii8620 *ctx) { + struct drm_display_mode *mode = + &ctx->bridge.encoder->crtc->state->adjusted_mode; + if (!sii8620_is_mhl3(ctx)) sii8620_stop_video(ctx); @@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx) sii8620_set_format(ctx); if (!sii8620_is_mhl3(ctx)) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); + u8 link_mode = MHL_DST_LM_PATH_ENABLED; + + if (ctx->use_packed_pixel) + link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode |= MHL_DST_LM_CLK_MODE_NORMAL; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); sii8620_set_auto_zone(ctx); } else { static const struct { @@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx) MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, }; u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; - int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); + int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3); int i; for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) @@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx) clk_spec[i].link_rate); } - sii8620_set_infoframes(ctx); + sii8620_set_infoframes(ctx, mode); } static void sii8620_disable_hpd(struct sii8620 *ctx) @@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx) static void sii8620_status_changed_path(struct sii8620 *ctx) { - if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL - | MHL_DST_LM_PATH_ENABLED); - } else { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL); - } + u8 link_mode; + + if (ctx->use_packed_pixel) + link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode = MHL_DST_LM_CLK_MODE_NORMAL; + + if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) + link_mode |= MHL_DST_LM_PATH_ENABLED; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + link_mode); } static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) @@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge, mutex_lock(&ctx->lock); ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); - ctx->video_code = drm_match_cea_mode(adjusted_mode); - ctx->pixel_clock = adjusted_mode->clock; mutex_unlock(&ctx->lock); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 130da5195f3b..81e32199d3ef 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane *plane = NULL; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; const struct drm_plane_helper_funcs *funcs; int i, n_planes = 0; @@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev, if (n_planes != 1) return -EINVAL; - if (!new_plane_state->crtc) + if (!new_plane_state->crtc || + old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; funcs = plane->helper_private; diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 3c4000facb36..f973d287696a 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, ctx->handle = drm_legacy_ctxbitmap_next(dev); } DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle == -1) { + if (ctx->handle < 0) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 50c73c0a20b9..d638c0fb3418 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Clone the lessor file to create a new file for us */ DRM_DEBUG_LEASE("Allocating lease file\n"); - path_get(&lessor_file->f_path); - lessee_file = alloc_file(&lessor_file->f_path, - lessor_file->f_mode, - fops_get(lessor_file->f_inode->i_fop)); - + lessee_file = filp_clone_open(lessor_file); if (IS_ERR(lessee_file)) { ret = PTR_ERR(lessee_file); goto out_lessee; } - /* Initialize the new file for DRM */ - DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open); - ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file); - if (ret) - goto out_lessee_file; - lessee_priv = lessee_file->private_data; - /* Change the file to a master one */ drm_master_put(&lessee_priv->master); lessee_priv->master = lessee; @@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); return 0; -out_lessee_file: - fput(lessee_file); - out_lessee: drm_master_put(&lessee); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 1f8031e30f53..cdb10f885a4f 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref) drm_mode_object_unregister(blob->dev, &blob->base); - kfree(blob); + kvfree(blob); } /** @@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return ERR_PTR(-ENOMEM); @@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, true, drm_property_free_blob); if (ret) { - kfree(blob); + kvfree(blob); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e5013a999147..540b59fb4103 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = { }, }; +static struct platform_device *etnaviv_drm; + static int __init etnaviv_init(void) { + struct platform_device *pdev; int ret; struct device_node *np; @@ -644,7 +647,7 @@ static int __init etnaviv_init(void) ret = platform_driver_register(&etnaviv_platform_driver); if (ret != 0) - platform_driver_unregister(&etnaviv_gpu_driver); + goto unregister_gpu_driver; /* * If the DT contains at least one available GPU device, instantiate @@ -653,20 +656,33 @@ static int __init etnaviv_init(void) for_each_compatible_node(np, NULL, "vivante,gc") { if (!of_device_is_available(np)) continue; - - platform_device_register_simple("etnaviv", -1, NULL, 0); + pdev = platform_device_register_simple("etnaviv", -1, + NULL, 0); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + of_node_put(np); + goto unregister_platform_driver; + } + etnaviv_drm = pdev; of_node_put(np); break; } + return 0; + +unregister_platform_driver: + platform_driver_unregister(&etnaviv_platform_driver); +unregister_gpu_driver: + platform_driver_unregister(&etnaviv_gpu_driver); return ret; } module_init(etnaviv_init); static void __exit etnaviv_exit(void) { - platform_driver_unregister(&etnaviv_gpu_driver); + platform_device_unregister(etnaviv_drm); platform_driver_unregister(&etnaviv_platform_driver); + platform_driver_unregister(&etnaviv_gpu_driver); } module_exit(etnaviv_exit); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index dd430f0f8ff5..90f17ff7888e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -131,6 +131,9 @@ struct etnaviv_gpu { struct work_struct sync_point_work; int sync_point_event; + /* hang detection */ + u32 hangcheck_dma_addr; + void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a74eb57af15b..50d6b88cb7aa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -10,6 +10,7 @@ #include "etnaviv_gem.h" #include "etnaviv_gpu.h" #include "etnaviv_sched.h" +#include "state.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; + u32 dma_addr; + int change; + + /* + * If the GPU managed to complete this jobs fence, the timout is + * spurious. Bail out. + */ + if (fence_completed(gpu, submit->out_fence->seqno)) + return; + + /* + * If the GPU is still making forward progress on the front-end (which + * should never loop) we shift out the timeout to give it a chance to + * finish the job. + */ + dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + change = dma_addr - gpu->hangcheck_dma_addr; + if (change < 0 || change > 16) { + gpu->hangcheck_dma_addr = dma_addr; + schedule_delayed_work(&sched_job->work_tdr, + sched_job->sched->timeout); + return; + } /* block scheduler */ kthread_park(gpu->sched.thread); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 82c95c34447f..e868773ea509 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); - val &= ~WINCONx_BPPMODE_MASK; + val &= WINCONx_ENWIN_F; switch (fb->format->format) { case DRM_FORMAT_XRGB1555: @@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->addr + DECON_VIDOSDxB(win)); } - val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | - VIDOSD_Wx_ALPHA_B_F(0x0); + val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) | + VIDOSD_Wx_ALPHA_B_F(0xff); writel(val, ctx->addr + DECON_VIDOSDxC(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index a81b4a5e24a7..ed3cc2989f93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -420,7 +420,7 @@ err_mode_config_cleanup: err_free_private: kfree(private); err_free_drm: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev) drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_unref(drm); + drm_dev_put(drm); } static const struct component_master_ops exynos_drm_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7fcc1a7ab1a0..27b7d34d776c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, err: while (i--) - drm_gem_object_unreference_unlocked(&exynos_gem[i]->base); + drm_gem_object_put_unlocked(&exynos_gem[i]->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6127ef25acd6..e8d0670bb5f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) static void fimc_set_window(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, h1, h2, v1, v2; /* cropped image */ h1 = buf->rect.x; - h2 = buf->buf.width - buf->rect.w - buf->rect.x; + h2 = real_width - buf->rect.w - buf->rect.x; v1 = buf->rect.y; v2 = buf->buf.height - buf->rect.h - buf->rect.y; DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, - buf->buf.width, buf->buf.height); + real_width, buf->buf.height); DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); /* @@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx, static void fimc_src_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) | EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGISIZE); @@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx, * for now, we support only ITU601 8 bit mode */ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | - EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | + EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) | EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_CISRCFMT); @@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) static void fimc_dst_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, cfg_ext; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) | EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 6e1494fa71b4..bdf5a7655228 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } @@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, exynos_gem = to_exynos_gem(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return exynos_gem->size; } @@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, return; } - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); /* * decrease obj->refcount one more time because we has already * increased it at exynos_drm_gem_get_dma_addr(). */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); } static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, @@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, args->flags = exynos_gem->flags; args->size = exynos_gem->size; - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 35ac66730563..7ba414b52faa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_IN_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV61: - cfg |= (GSC_IN_CHROMA_ORDER_CRCB | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P); break; case DRM_FORMAT_YUV422: cfg |= GSC_IN_YUV422_3P; break; case DRM_FORMAT_YUV420: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_IN_YUV420_3P; + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_IN_CHROMA_ORDER_CBCR | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P); break; } @@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) switch (degree) { case DRM_MODE_ROTATE_0: - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_90: cfg |= GSC_IN_ROT_90; - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_180: cfg |= GSC_IN_ROT_180; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_270: cfg |= GSC_IN_ROT_270; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; } @@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx, cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | GSC_SRCIMG_WIDTH_MASK); - cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | + cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_SRCIMG_HEIGHT(buf->buf.height)); gsc_write(cfg, GSC_SRCIMG_SIZE); @@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_OUT_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: - case DRM_FORMAT_NV61: cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); break; + case DRM_FORMAT_NV61: + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P); + break; case DRM_FORMAT_YUV422: + cfg |= GSC_OUT_YUV422_3P; + break; case DRM_FORMAT_YUV420: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_OUT_YUV420_3P; + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | - GSC_OUT_YUV420_2P); + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P); break; } @@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx, /* original size */ cfg = gsc_read(GSC_DSTIMG_SIZE); cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); - cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | + cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_DSTIMG_HEIGHT(buf->buf.height); gsc_write(cfg, GSC_DSTIMG_SIZE); @@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { }; static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { - { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) }, { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 26374e58c557..b435db8fc916 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, int ret = 0; int i; - /* basic checks */ - if (buf->buf.width == 0 || buf->buf.height == 0) - return -EINVAL; - buf->format = drm_format_info(buf->buf.fourcc); - for (i = 0; i < buf->format->num_planes; i++) { - unsigned int width = (i == 0) ? buf->buf.width : - DIV_ROUND_UP(buf->buf.width, buf->format->hsub); - - if (buf->buf.pitch[i] == 0) - buf->buf.pitch[i] = width * buf->format->cpp[i]; - if (buf->buf.pitch[i] < width * buf->format->cpp[i]) - return -EINVAL; - if (!buf->buf.gem_id[i]) - return -ENOENT; - } - - /* pitch for additional planes must match */ - if (buf->format->num_planes > 2 && - buf->buf.pitch[1] != buf->buf.pitch[2]) - return -EINVAL; - /* get GEM buffers and check their size */ for (i = 0; i < buf->format->num_planes; i++) { unsigned int height = (i == 0) ? buf->buf.height : @@ -428,7 +407,7 @@ enum drm_ipp_size_id { IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX }; -static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { +static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = { [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, @@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; struct drm_ipp_limit l; struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; + int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; if (!limits) return 0; __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); - if (!__size_limit_check(buf->buf.width, &l.h) || + if (!__size_limit_check(real_width, &l.h) || !__size_limit_check(buf->buf.height, &l.v)) return -EINVAL; @@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits( return 0; } +static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, + struct exynos_drm_ipp_buffer *buf, + struct exynos_drm_ipp_buffer *src, + struct exynos_drm_ipp_buffer *dst, + bool rotate, bool swap) +{ + const struct exynos_drm_ipp_formats *fmt; + int ret, i; + + fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier, + buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE : + DRM_EXYNOS_IPP_FORMAT_DESTINATION); + if (!fmt) { + DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task, + buf == src ? "src" : "dst"); + return -EINVAL; + } + + /* basic checks */ + if (buf->buf.width == 0 || buf->buf.height == 0) + return -EINVAL; + + buf->format = drm_format_info(buf->buf.fourcc); + for (i = 0; i < buf->format->num_planes; i++) { + unsigned int width = (i == 0) ? buf->buf.width : + DIV_ROUND_UP(buf->buf.width, buf->format->hsub); + + if (buf->buf.pitch[i] == 0) + buf->buf.pitch[i] = width * buf->format->cpp[i]; + if (buf->buf.pitch[i] < width * buf->format->cpp[i]) + return -EINVAL; + if (!buf->buf.gem_id[i]) + return -ENOENT; + } + + /* pitch for additional planes must match */ + if (buf->format->num_planes > 2 && + buf->buf.pitch[1] != buf->buf.pitch[2]) + return -EINVAL; + + /* check driver limits */ + ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits, + fmt->num_limits, + rotate, + buf == dst ? swap : false); + if (ret) + return ret; + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, + fmt->limits, + fmt->num_limits, swap); + return ret; +} + static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) { struct exynos_drm_ipp *ipp = task->ipp; - const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; unsigned int rotation = task->transform.rotation; int ret = 0; @@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) return -EINVAL; } - src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_SOURCE); - if (!src_fmt) { - DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, - src_fmt->num_limits, - rotate, false); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - src_fmt->limits, - src_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap); if (ret) return ret; - dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_DESTINATION); - if (!dst_fmt) { - DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, - dst_fmt->num_limits, - false, swap); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - dst_fmt->limits, - dst_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 38a2a7f1204b..7098c6d35266 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane) if (plane->state) { exynos_state = to_exynos_plane_state(plane->state); if (exynos_state->base.fb) - drm_framebuffer_unreference(exynos_state->base.fb); + drm_framebuffer_put(exynos_state->base.fb); kfree(exynos_state); plane->state = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 1a76dd3d52e1..a820a68429b9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot, val &= ~ROT_CONTROL_FLIP_MASK; if (rotation & DRM_MODE_REFLECT_X) - val |= ROT_CONTROL_FLIP_HORIZONTAL; - if (rotation & DRM_MODE_REFLECT_Y) val |= ROT_CONTROL_FLIP_VERTICAL; + if (rotation & DRM_MODE_REFLECT_Y) + val |= ROT_CONTROL_FLIP_HORIZONTAL; val &= ~ROT_CONTROL_ROT_MASK; diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 91d4382343d0..0ddb6eec7b11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -30,6 +30,7 @@ #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) #define SCALER_MAX_CLK 4 #define SCALER_AUTOSUSPEND_DELAY 2000 +#define SCALER_RESET_WAIT_RETRIES 100 struct scaler_data { const char *clk_name[SCALER_MAX_CLK]; @@ -51,9 +52,9 @@ struct scaler_context { static u32 scaler_get_format(u32 drm_fmt) { switch (drm_fmt) { - case DRM_FORMAT_NV21: - return SCALER_YUV420_2P_UV; case DRM_FORMAT_NV12: + return SCALER_YUV420_2P_UV; + case DRM_FORMAT_NV21: return SCALER_YUV420_2P_VU; case DRM_FORMAT_YUV420: return SCALER_YUV420_3P; @@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt) return SCALER_YUV422_1P_UYVY; case DRM_FORMAT_YVYU: return SCALER_YUV422_1P_YVYU; - case DRM_FORMAT_NV61: - return SCALER_YUV422_2P_UV; case DRM_FORMAT_NV16: + return SCALER_YUV422_2P_UV; + case DRM_FORMAT_NV61: return SCALER_YUV422_2P_VU; case DRM_FORMAT_YUV422: return SCALER_YUV422_3P; - case DRM_FORMAT_NV42: - return SCALER_YUV444_2P_UV; case DRM_FORMAT_NV24: + return SCALER_YUV444_2P_UV; + case DRM_FORMAT_NV42: return SCALER_YUV444_2P_VU; case DRM_FORMAT_YUV444: return SCALER_YUV444_3P; @@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt) return 0; } +static inline int scaler_reset(struct scaler_context *scaler) +{ + int retry = SCALER_RESET_WAIT_RETRIES; + + scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); + do { + cpu_relax(); + } while (retry > 1 && + scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); + do { + cpu_relax(); + scaler_write(1, SCALER_INT_EN); + } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + + return retry ? 0 : -EIO; +} + static inline void scaler_enable_int(struct scaler_context *scaler) { u32 val; @@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp, u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; - scaler->task = task; - pm_runtime_get_sync(scaler->dev); + if (scaler_reset(scaler)) { + pm_runtime_put(scaler->dev); + return -EIO; + } + + scaler->task = task; scaler_set_src_fmt(scaler, src_fmt); scaler_set_src_base(scaler, &task->src); @@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler) static inline u32 scaler_get_int_status(struct scaler_context *scaler) { - return scaler_read(SCALER_INT_STATUS); + u32 val = scaler_read(SCALER_INT_STATUS); + + scaler_write(val, SCALER_INT_STATUS); + + return val; } static inline int scaler_task_done(u32 val) diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 4704a993cbb7..16b39734115c 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -138,6 +138,7 @@ #define GSC_OUT_YUV420_3P (3 << 4) #define GSC_OUT_YUV422_1P (4 << 4) #define GSC_OUT_YUV422_2P (5 << 4) +#define GSC_OUT_YUV422_3P (6 << 4) #define GSC_OUT_YUV444 (7 << 4) #define GSC_OUT_TILE_TYPE_MASK (1 << 2) #define GSC_OUT_TILE_C_16x8 (0 << 2) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b51c05d03f14..7f562410f9cf 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, { struct intel_vgpu *vgpu = s->vgpu; struct intel_gvt *gvt = vgpu->gvt; + u32 ctx_sr_ctl; if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", @@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s, patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } + /* TODO + * Right now only scan LRI command on KBL and in inhibit context. + * It's good enough to support initializing mmio by lri command in + * vgpu inhibit context on KBL. + */ + if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && + intel_gvt_mmio_is_in_ctx(gvt, offset) && + !strncmp(cmd, "lri", 3)) { + intel_gvt_hypervisor_read_gpa(s->vgpu, + s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); + /* check inhibit context */ + if (ctx_sr_ctl & 1) { + u32 data = cmd_val(s, index + 1); + + if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) + intel_vgpu_mask_mmio_write(vgpu, + offset, &data, 4); + else + vgpu_vreg(vgpu, offset) = data; + } + } + /* TODO: Update the global mask if this MMIO is a masked-MMIO */ intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8180e8d1e2..4b072ade8c38 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 23296547da95..4efec8fa6c1d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.last_partial_off = -1UL; return mm; } @@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + mm->ggtt_mm.last_partial_off = -1UL; } vgpu_free_mm(mm); @@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); + /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes + * write, we assume the two 4 bytes writes are consecutive. + * Otherwise, we abort and report error + */ + if (bytes < info->gtt_entry_size) { + if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { + /* the first partial part*/ + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + return 0; + } else if ((g_gtt_index == + (ggtt_mm->ggtt_mm.last_partial_off >> + info->gtt_entry_size_shift)) && + (off != ggtt_mm->ggtt_mm.last_partial_off)) { + /* the second partial part */ + + int last_off = ggtt_mm->ggtt_mm.last_partial_off & + (info->gtt_entry_size - 1); + + memcpy((void *)&e.val64 + last_off, + (void *)&ggtt_mm->ggtt_mm.last_partial_data + + last_off, bytes); + + ggtt_mm->ggtt_mm.last_partial_off = -1UL; + } else { + int last_offset; + + gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", + ggtt_mm->ggtt_mm.last_partial_off, off, + bytes, info->gtt_entry_size); + + /* set host ggtt entry to scratch page and clear + * virtual ggtt entry as not present for last + * partially write offset + */ + last_offset = ggtt_mm->ggtt_mm.last_partial_off & + (~(info->gtt_entry_size - 1)); + + ggtt_get_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate_pte(vgpu, &m); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + ggtt_set_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate(gvt->dev_priv); + + ggtt_get_guest_entry(ggtt_mm, &e, last_offset); + ops->clear_present(&e); + ggtt_set_guest_entry(ggtt_mm, &e, last_offset); + + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + + return 0; + } + } + if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); m = e; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..97e62647418a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -150,6 +150,8 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + unsigned long last_partial_off; + u64 last_partial_data; } ggtt_mm; }; }; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 05d15a095310..858967daf04b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -268,6 +268,8 @@ struct intel_gvt_mmio { #define F_CMD_ACCESSED (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) +/* This reg is saved/restored in context */ +#define F_IN_CTX (1 << 7) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; @@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; } +/** + * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * True if a MMIO has a in-context mask, false if it isn't. + * + */ +static inline bool intel_gvt_mmio_is_in_ctx( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; +} + +/** + * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_in_ctx( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; +} + int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); int intel_gvt_debugfs_init(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bcbc47a88a70..8f1caacdc78a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3046,6 +3046,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } /** + * intel_vgpu_mask_mmio_write - write mask register + * @vgpu: a vGPU + * @offset: access offset + * @p_data: write data buffer + * @bytes: access data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 mask, old_vreg; + + old_vreg = vgpu_vreg(vgpu, offset); + write_vreg(vgpu, offset, p_data, bytes); + mask = vgpu_vreg(vgpu, offset) >> 16; + vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | + (vgpu_vreg(vgpu, offset) & mask); + + return 0; +} + +/** * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be * force-nopriv register * diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 71b620875943..dac8c6401e26 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, void *pdata, unsigned int bytes, bool is_read); +int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); #endif diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 0f949554d118..5ca9caf7552a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { - if (mmio->in_context) + if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; + intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); + } } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 52f3b91d14fd..71e1aa54f774 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -652,6 +652,7 @@ enum intel_sbi_destination { #define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) #define QUIRK_INCREASE_T12_DELAY (1<<6) +#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) struct intel_fbdev; struct intel_fbc_work; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d44ad7bc1e94..17c5097721e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf) bool write = !!(vmf->flags & FAULT_FLAG_WRITE); struct i915_vma *vma; pgoff_t page_offset; - unsigned int flags; int ret; /* We don't use vmf->pgoff since that has the fake offset */ @@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* If the object is smaller than a couple of partial vma, it is - * not worth only creating a single partial vma - we may as well - * clear enough space for the full object. - */ - flags = PIN_MAPPABLE; - if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) - flags |= PIN_NONBLOCK | PIN_NONFAULT; /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; - /* Userspace is now writing through an untracked VMA, abandon + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ + + /* + * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a02747ac658..c16cb025755e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1998,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_status = 0, hotplug_status_mask; + int i; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; - if (hotplug_status) + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; + + hotplug_status |= tmp; I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + } + + WARN_ONCE(1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 9324d476e0a7..0531c01c3604 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT)); vma->size = view->partial.size; vma->size <<= PAGE_SHIFT; - GEM_BUG_ON(vma->size >= obj->base.size); + GEM_BUG_ON(vma->size > obj->base.size); } else if (view->type == I915_GGTT_VIEW_ROTATED) { vma->size = intel_rotation_info_size(&view->rotated); vma->size <<= PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f4a8598a2d39..fed26d6e4e27 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); + + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); + /* Quirk time at 100ms for reliable operation */ + msleep(100); + } } int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2cc6faa1daa8..dec0d60921bf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); + intel_ddi_disable_transcoder_func(old_crtc_state); if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); @@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev) DRM_INFO("Applying T12 delay quirk\n"); } +/* + * GeminiLake NUC HDMI outputs require additional off time + * this allows the onboard retimer to correctly sync to signal + */ +static void quirk_increase_ddi_disabled_time(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + DRM_INFO("Applying Increase DDI Disabled quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = { /* Toshiba Satellite P50-C-18C */ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, + + /* GeminiLake NUC */ + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + /* ASRock ITX*/ + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0361130500a6..b8eefbffc77d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc, void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder); +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); struct intel_encoder * diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 56dd7a9a8e25..dd5312b02a8d 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imx_ldb->regmap); } + /* disable LDB by resetting the control register to POR default */ + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); + imx_ldb->dev = dev; if (of_id) @@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (ret || i < 0 || i > 1) return -EINVAL; + if (!of_device_is_available(child)) + continue; + if (dual && i > 0) { dev_warn(dev, "dual-channel mode, ignoring second output\n"); continue; } - if (!of_device_is_available(child)) - continue; - channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 501d2d290e9c..70dce544984e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) nouveau_display(dev)->init = nv04_display_init; nouveau_display(dev)->fini = nv04_display_fini; + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ + dev->driver->driver_features &= ~DRIVER_ATOMIC; + nouveau_hw_save_vga_fonts(dev, 1); nv04_crtc_create(dev, 0); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b83465ae7c1b..9bae4db84cfb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1585,8 +1585,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) *****************************************************************************/ static void -nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) +nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) { + struct nouveau_drm *drm = nouveau_drm(state->dev); struct nv50_disp *disp = nv50_disp(drm->dev); struct nv50_core *core = disp->core; struct nv50_mstm *mstm; @@ -1618,6 +1619,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) } static void +nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock) +{ + struct drm_plane_state *new_plane_state; + struct drm_plane *plane; + int i; + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + struct nv50_wndw *wndw = nv50_wndw(plane); + if (interlock[wndw->interlock.type] & wndw->interlock.data) { + if (wndw->func->update) + wndw->func->update(wndw, interlock); + } + } +} + +static void nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -1684,7 +1701,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) help->disable(encoder); interlock[NV50_DISP_INTERLOCK_CORE] |= 1; if (outp->flush_disable) { - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_wndw(state, interlock); + nv50_disp_atomic_commit_core(state, interlock); memset(interlock, 0x00, sizeof(interlock)); } } @@ -1693,15 +1711,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) /* Flush disable. */ if (interlock[NV50_DISP_INTERLOCK_CORE]) { if (atom->flush_disable) { - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (interlock[wndw->interlock.type] & wndw->interlock.data) { - if (wndw->func->update) - wndw->func->update(wndw, interlock); - } - } - - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_wndw(state, interlock); + nv50_disp_atomic_commit_core(state, interlock); memset(interlock, 0x00, sizeof(interlock)); } } @@ -1762,18 +1773,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } /* Flush update. */ - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (interlock[wndw->interlock.type] & wndw->interlock.data) { - if (wndw->func->update) - wndw->func->update(wndw, interlock); - } - } + nv50_disp_atomic_commit_wndw(state, interlock); if (interlock[NV50_DISP_INTERLOCK_CORE]) { if (interlock[NV50_DISP_INTERLOCK_BASE] || + interlock[NV50_DISP_INTERLOCK_OVLY] || + interlock[NV50_DISP_INTERLOCK_WNDW] || !atom->state.legacy_cursor_update) - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_core(state, interlock); else disp->core->func->update(disp->core, interlock, false); } @@ -1871,7 +1878,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, nv50_disp_atomic_commit_tail(state); drm_for_each_crtc(crtc, dev) { - if (crtc->state->enable) { + if (crtc->state->active) { if (!drm->have_disp_power_ref) { drm->have_disp_power_ref = true; return 0; @@ -2119,10 +2126,6 @@ nv50_display_destroy(struct drm_device *dev) kfree(disp); } -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); -static int nouveau_atomic = 0; -module_param_named(atomic, nouveau_atomic, int, 0400); - int nv50_display_create(struct drm_device *dev) { @@ -2147,8 +2150,6 @@ nv50_display_create(struct drm_device *dev) disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; - if (nouveau_atomic) - dev->driver->driver_features |= DRIVER_ATOMIC; /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index debbbf0fd4bd..408b955e5c39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; INIT_LIST_HEAD(&drm->bl_connectors); @@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev) return 0; } - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && connector->connector_type != DRM_MODE_CONNECTOR_eDP) continue; @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev) break; } } - + drm_connector_list_iter_end(&conn_iter); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7b557c354307..af68eae4c626 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_connector *nv_connector = NULL; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int type, ret = 0; bool dummy; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { nv_connector = nouveau_connector(connector); - if (nv_connector->index == index) + if (nv_connector->index == index) { + drm_connector_list_iter_end(&conn_iter); return connector; + } } + drm_connector_list_iter_end(&conn_iter); nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); if (!nv_connector) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index a4d1a059bd3d..dc7454e7f19a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -33,6 +33,7 @@ #include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> #include "nouveau_crtc.h" +#include "nouveau_encoder.h" struct nvkm_i2c_port; @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } +static inline bool +nouveau_connector_is_mst(struct drm_connector *connector) +{ + const struct nouveau_encoder *nv_encoder; + const struct drm_encoder *encoder; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return false; + + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY); + if (!nv_encoder) + return false; + + encoder = &nv_encoder->base.base; + return encoder->encoder_type == DRM_MODE_ENCODER_DPMST; +} + +#define nouveau_for_each_non_mst_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + for_each_if(!nouveau_connector_is_mst(connector)) + static inline struct nouveau_connector * nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct nouveau_connector *nv_connector = NULL; struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + if (connector->encoder && connector->encoder->crtc == crtc) { + nv_connector = nouveau_connector(connector); + break; + } } + drm_connector_list_iter_end(&conn_iter); - return NULL; + return nv_connector; } struct drm_connector * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 774b429142bc..ec7861457b84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int ret; ret = disp->init(dev); @@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_get(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); /* enable flip completion events */ nvif_notify_get(&drm->flip); @@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) nvif_notify_put(&drm->flip); /* disable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_put(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); drm_kms_helper_poll_disable(dev); disp->fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 775443c9af94..f5d3158f0378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); +static int nouveau_atomic = 0; +module_param_named(atomic, nouveau_atomic, int, 0400); + MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); static int nouveau_runtime_pm = -1; module_param_named(runpm, nouveau_runtime_pm, int, 0400); @@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, pci_set_master(pdev); + if (nouveau_atomic) + driver_pci.driver_features |= DRIVER_ATOMIC; + ret = drm_get_pci_dev(pdev, pent, &driver_pci); if (ret) { nvkm_device_del(&device); @@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev) static int nouveau_pmops_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct nouveau_drm *drm = nouveau_drm(drm_dev); - struct drm_crtc *crtc; - if (!nouveau_pmops_runtime()) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; - } - } pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 300daee74209..e6ccafcb9c41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct nouveau_bo *nvbo; uint32_t data; - if (unlikely(r->bo_index > req->nr_buffers)) { + if (unlikely(r->bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc bo index invalid\n"); ret = -EINVAL; break; @@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, if (b->presumed.valid) continue; - if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc container bo index invalid\n"); ret = -EINVAL; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index 73b5d46104bd..434d2fc5bb1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init) fb->func->init(fb); + if (fb->func->init_remapper) + fb->func->init_remapper(fb); + if (fb->func->init_page) { ret = fb->func->init_page(fb); if (WARN_ON(ret)) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c index dffe1f5e1071..8205ce436b3e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c @@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base) } void +gp100_fb_init_remapper(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + /* Disable address remapper. */ + nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000); +} + +void gp100_fb_init(struct nvkm_fb *base) { struct gf100_fb *fb = gf100_fb(base); @@ -56,6 +64,7 @@ gp100_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gp100_fb_init, + .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, .init_unkn = gp100_fb_init_unkn, .ram_new = gp100_ram_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b84b9861ef26..b4d74e815674 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -31,6 +31,7 @@ gp102_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gp100_fb_init, + .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, .ram_new = gp100_ram_new, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 2857f31466bf..1e4ad61c19e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -11,6 +11,7 @@ struct nvkm_fb_func { u32 (*tags)(struct nvkm_fb *); int (*oneinit)(struct nvkm_fb *); void (*init)(struct nvkm_fb *); + void (*init_remapper)(struct nvkm_fb *); int (*init_page)(struct nvkm_fb *); void (*init_unkn)(struct nvkm_fb *); void (*intr)(struct nvkm_fb *); @@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *); int gm200_fb_init_page(struct nvkm_fb *); +void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index 2589f4acd5ae..9c81301d0eed 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o -obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o +obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o +ifdef CONFIG_DRM_SUN4I_BACKEND +obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o +endif obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 776c1513e582..a2bd5876c633 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, * unaligned offset is malformed and cause commands stream * corruption on the buffer address relocation. */ - if (offset & 3 || offset >= obj->gem.size) { + if (offset & 3 || offset > obj->gem.size) { err = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 2ebdc6d5a76e..d5583190f3e4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (cmd > (char *) urb->transfer_buffer) { /* Send partial buffer remaining before exiting */ - int len = cmd - (char *) urb->transfer_buffer; + int len; + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) + *cmd++ = 0xAF; + len = cmd - (char *) urb->transfer_buffer; ret = udl_submit_urb(dev, urb, len); bytes_sent += len; } else diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 0c87b1ac6b68..b992644c17e6 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -153,11 +153,11 @@ static void udl_compress_hline16( raw_pixels_count_byte = cmd++; /* we'll know this later */ raw_pixel_start = pixel; - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, - min((int)(pixel_end - pixel) / bpp, - (int)(cmd_buffer_end - cmd) / 2))) * bpp; + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, + (unsigned long)(pixel_end - pixel) / bpp, + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + prefetch_range((void *) pixel, cmd_pixel_end - pixel); pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { @@ -193,6 +193,9 @@ static void udl_compress_hline16( if (pixel > raw_pixel_start) { /* finalize last RAW span */ *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; + } else { + /* undo unused byte */ + cmd--; } *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 1d34619eb3fe..a951ec75d01f 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->x_scaling[0] = VC4_SCALING_TPZ; if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } else { + vc4_state->x_scaling[1] = VC4_SCALING_NONE; + vc4_state->y_scaling[1] = VC4_SCALING_NONE; } vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f1d5f76e9c33..d88073e7d22d 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev) return err; } + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) + goto skip_iommu; + host->group = iommu_group_get(&pdev->dev); if (host->group) { struct iommu_domain_geometry *geometry; diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index e2f4a4d93d20..527a1cddb14f 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; - if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { + if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && + unpin->size && host->domain) { iommu_unmap(host->domain, job->addr_phys[i], unpin->size); free_iova(&host->iova, diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index caa05b0702e1..5450a2db1219 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, break; case V4L2_MBUS_BT656: csicfg->ext_vsync = 0; - if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) + if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) || + mbus_fmt->field == V4L2_FIELD_ALTERNATE) csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; else csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; |