diff options
Diffstat (limited to 'drivers/gpu/drm')
176 files changed, 3491 insertions, 1784 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 5b393622f592..a0f0a17e224f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -119,6 +119,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_USBC 0x17 /* deleted */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f428f94b43c0..7e73ac6fb21d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1397,12 +1397,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1411,6 +1409,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) { return 0; } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +#else +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +#endif + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4811b0faafd9..0e12315fa0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void) } } +#if IS_ENABLED(CONFIG_SUSPEND) +/** + * amdgpu_acpi_is_s3_active + * + * @adev: amdgpu_device_pointer + * + * returns true if supported, false if not. + */ +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) +{ + return !(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state == PM_SUSPEND_MEM); +} + /** * amdgpu_acpi_is_s0ix_active * @@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { -#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { - if (adev->flags & AMD_IS_APU) - return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + if (!(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) + return false; + + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_warn_once(adev->dev, + "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" + "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; } -#endif + +#if !IS_ENABLED(CONFIG_AMD_PMC) + dev_warn_once(adev->dev, + "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; +#else + return true; +#endif /* CONFIG_AMD_PMC */ } + +#endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index df1f9b88a53f..a09876bb7ec8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -175,7 +175,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { - if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && + if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) && (mode_clock * 5/4 <= max_tmds_clock)) bpc = 10; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 913f9eaa9cd6..aa823f154199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1508,6 +1508,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, return 0; default: + dma_fence_put(fence); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1545884dc703..2f2ae26a8068 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/slab.h> +#include <linux/pci.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> @@ -2069,6 +2070,8 @@ out: */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); + struct pci_dev *parent; int i, r; amdgpu_device_enable_virtual_display(adev); @@ -2168,6 +2171,18 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return -EINVAL; } + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + ((adev->flags & AMD_IS_APU) == 0) && + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) + adev->flags |= AMD_IS_PX; + + if (!(adev->flags & AMD_IS_APU)) { + parent = pci_upstream_bridge(adev->pdev); + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; + } + amdgpu_amdkfd_device_probe(adev); adev->pm.pp_feature = amdgpu_pp_feature_mask; @@ -5610,7 +5625,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) @@ -5626,7 +5641,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dc50c05f23fc..5c08047adb59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1145,7 +1145,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 30059b7db0b2..b517b76e96a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ -int hws_max_conc_proc = 8; +int hws_max_conc_proc = -1; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); @@ -891,6 +891,717 @@ MODULE_PARM_DESC(smu_pptable_id, "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); +/* These devices are not supported by amdgpu. + * They are supported by the mach64, r128, radeon drivers + */ +static const u16 amdgpu_unsupported_pciidlist[] = { + /* mach64 */ + 0x4354, + 0x4358, + 0x4554, + 0x4742, + 0x4744, + 0x4749, + 0x474C, + 0x474D, + 0x474E, + 0x474F, + 0x4750, + 0x4751, + 0x4752, + 0x4753, + 0x4754, + 0x4755, + 0x4756, + 0x4757, + 0x4758, + 0x4759, + 0x475A, + 0x4C42, + 0x4C44, + 0x4C47, + 0x4C49, + 0x4C4D, + 0x4C4E, + 0x4C50, + 0x4C51, + 0x4C52, + 0x4C53, + 0x5654, + 0x5655, + 0x5656, + /* r128 */ + 0x4c45, + 0x4c46, + 0x4d46, + 0x4d4c, + 0x5041, + 0x5042, + 0x5043, + 0x5044, + 0x5045, + 0x5046, + 0x5047, + 0x5048, + 0x5049, + 0x504A, + 0x504B, + 0x504C, + 0x504D, + 0x504E, + 0x504F, + 0x5050, + 0x5051, + 0x5052, + 0x5053, + 0x5054, + 0x5055, + 0x5056, + 0x5057, + 0x5058, + 0x5245, + 0x5246, + 0x5247, + 0x524b, + 0x524c, + 0x534d, + 0x5446, + 0x544C, + 0x5452, + /* radeon */ + 0x3150, + 0x3151, + 0x3152, + 0x3154, + 0x3155, + 0x3E50, + 0x3E54, + 0x4136, + 0x4137, + 0x4144, + 0x4145, + 0x4146, + 0x4147, + 0x4148, + 0x4149, + 0x414A, + 0x414B, + 0x4150, + 0x4151, + 0x4152, + 0x4153, + 0x4154, + 0x4155, + 0x4156, + 0x4237, + 0x4242, + 0x4336, + 0x4337, + 0x4437, + 0x4966, + 0x4967, + 0x4A48, + 0x4A49, + 0x4A4A, + 0x4A4B, + 0x4A4C, + 0x4A4D, + 0x4A4E, + 0x4A4F, + 0x4A50, + 0x4A54, + 0x4B48, + 0x4B49, + 0x4B4A, + 0x4B4B, + 0x4B4C, + 0x4C57, + 0x4C58, + 0x4C59, + 0x4C5A, + 0x4C64, + 0x4C66, + 0x4C67, + 0x4E44, + 0x4E45, + 0x4E46, + 0x4E47, + 0x4E48, + 0x4E49, + 0x4E4A, + 0x4E4B, + 0x4E50, + 0x4E51, + 0x4E52, + 0x4E53, + 0x4E54, + 0x4E56, + 0x5144, + 0x5145, + 0x5146, + 0x5147, + 0x5148, + 0x514C, + 0x514D, + 0x5157, + 0x5158, + 0x5159, + 0x515A, + 0x515E, + 0x5460, + 0x5462, + 0x5464, + 0x5548, + 0x5549, + 0x554A, + 0x554B, + 0x554C, + 0x554D, + 0x554E, + 0x554F, + 0x5550, + 0x5551, + 0x5552, + 0x5554, + 0x564A, + 0x564B, + 0x564F, + 0x5652, + 0x5653, + 0x5657, + 0x5834, + 0x5835, + 0x5954, + 0x5955, + 0x5974, + 0x5975, + 0x5960, + 0x5961, + 0x5962, + 0x5964, + 0x5965, + 0x5969, + 0x5a41, + 0x5a42, + 0x5a61, + 0x5a62, + 0x5b60, + 0x5b62, + 0x5b63, + 0x5b64, + 0x5b65, + 0x5c61, + 0x5c63, + 0x5d48, + 0x5d49, + 0x5d4a, + 0x5d4c, + 0x5d4d, + 0x5d4e, + 0x5d4f, + 0x5d50, + 0x5d52, + 0x5d57, + 0x5e48, + 0x5e4a, + 0x5e4b, + 0x5e4c, + 0x5e4d, + 0x5e4f, + 0x6700, + 0x6701, + 0x6702, + 0x6703, + 0x6704, + 0x6705, + 0x6706, + 0x6707, + 0x6708, + 0x6709, + 0x6718, + 0x6719, + 0x671c, + 0x671d, + 0x671f, + 0x6720, + 0x6721, + 0x6722, + 0x6723, + 0x6724, + 0x6725, + 0x6726, + 0x6727, + 0x6728, + 0x6729, + 0x6738, + 0x6739, + 0x673e, + 0x6740, + 0x6741, + 0x6742, + 0x6743, + 0x6744, + 0x6745, + 0x6746, + 0x6747, + 0x6748, + 0x6749, + 0x674A, + 0x6750, + 0x6751, + 0x6758, + 0x6759, + 0x675B, + 0x675D, + 0x675F, + 0x6760, + 0x6761, + 0x6762, + 0x6763, + 0x6764, + 0x6765, + 0x6766, + 0x6767, + 0x6768, + 0x6770, + 0x6771, + 0x6772, + 0x6778, + 0x6779, + 0x677B, + 0x6840, + 0x6841, + 0x6842, + 0x6843, + 0x6849, + 0x684C, + 0x6850, + 0x6858, + 0x6859, + 0x6880, + 0x6888, + 0x6889, + 0x688A, + 0x688C, + 0x688D, + 0x6898, + 0x6899, + 0x689b, + 0x689c, + 0x689d, + 0x689e, + 0x68a0, + 0x68a1, + 0x68a8, + 0x68a9, + 0x68b0, + 0x68b8, + 0x68b9, + 0x68ba, + 0x68be, + 0x68bf, + 0x68c0, + 0x68c1, + 0x68c7, + 0x68c8, + 0x68c9, + 0x68d8, + 0x68d9, + 0x68da, + 0x68de, + 0x68e0, + 0x68e1, + 0x68e4, + 0x68e5, + 0x68e8, + 0x68e9, + 0x68f1, + 0x68f2, + 0x68f8, + 0x68f9, + 0x68fa, + 0x68fe, + 0x7100, + 0x7101, + 0x7102, + 0x7103, + 0x7104, + 0x7105, + 0x7106, + 0x7108, + 0x7109, + 0x710A, + 0x710B, + 0x710C, + 0x710E, + 0x710F, + 0x7140, + 0x7141, + 0x7142, + 0x7143, + 0x7144, + 0x7145, + 0x7146, + 0x7147, + 0x7149, + 0x714A, + 0x714B, + 0x714C, + 0x714D, + 0x714E, + 0x714F, + 0x7151, + 0x7152, + 0x7153, + 0x715E, + 0x715F, + 0x7180, + 0x7181, + 0x7183, + 0x7186, + 0x7187, + 0x7188, + 0x718A, + 0x718B, + 0x718C, + 0x718D, + 0x718F, + 0x7193, + 0x7196, + 0x719B, + 0x719F, + 0x71C0, + 0x71C1, + 0x71C2, + 0x71C3, + 0x71C4, + 0x71C5, + 0x71C6, + 0x71C7, + 0x71CD, + 0x71CE, + 0x71D2, + 0x71D4, + 0x71D5, + 0x71D6, + 0x71DA, + 0x71DE, + 0x7200, + 0x7210, + 0x7211, + 0x7240, + 0x7243, + 0x7244, + 0x7245, + 0x7246, + 0x7247, + 0x7248, + 0x7249, + 0x724A, + 0x724B, + 0x724C, + 0x724D, + 0x724E, + 0x724F, + 0x7280, + 0x7281, + 0x7283, + 0x7284, + 0x7287, + 0x7288, + 0x7289, + 0x728B, + 0x728C, + 0x7290, + 0x7291, + 0x7293, + 0x7297, + 0x7834, + 0x7835, + 0x791e, + 0x791f, + 0x793f, + 0x7941, + 0x7942, + 0x796c, + 0x796d, + 0x796e, + 0x796f, + 0x9400, + 0x9401, + 0x9402, + 0x9403, + 0x9405, + 0x940A, + 0x940B, + 0x940F, + 0x94A0, + 0x94A1, + 0x94A3, + 0x94B1, + 0x94B3, + 0x94B4, + 0x94B5, + 0x94B9, + 0x9440, + 0x9441, + 0x9442, + 0x9443, + 0x9444, + 0x9446, + 0x944A, + 0x944B, + 0x944C, + 0x944E, + 0x9450, + 0x9452, + 0x9456, + 0x945A, + 0x945B, + 0x945E, + 0x9460, + 0x9462, + 0x946A, + 0x946B, + 0x947A, + 0x947B, + 0x9480, + 0x9487, + 0x9488, + 0x9489, + 0x948A, + 0x948F, + 0x9490, + 0x9491, + 0x9495, + 0x9498, + 0x949C, + 0x949E, + 0x949F, + 0x94C0, + 0x94C1, + 0x94C3, + 0x94C4, + 0x94C5, + 0x94C6, + 0x94C7, + 0x94C8, + 0x94C9, + 0x94CB, + 0x94CC, + 0x94CD, + 0x9500, + 0x9501, + 0x9504, + 0x9505, + 0x9506, + 0x9507, + 0x9508, + 0x9509, + 0x950F, + 0x9511, + 0x9515, + 0x9517, + 0x9519, + 0x9540, + 0x9541, + 0x9542, + 0x954E, + 0x954F, + 0x9552, + 0x9553, + 0x9555, + 0x9557, + 0x955f, + 0x9580, + 0x9581, + 0x9583, + 0x9586, + 0x9587, + 0x9588, + 0x9589, + 0x958A, + 0x958B, + 0x958C, + 0x958D, + 0x958E, + 0x958F, + 0x9590, + 0x9591, + 0x9593, + 0x9595, + 0x9596, + 0x9597, + 0x9598, + 0x9599, + 0x959B, + 0x95C0, + 0x95C2, + 0x95C4, + 0x95C5, + 0x95C6, + 0x95C7, + 0x95C9, + 0x95CC, + 0x95CD, + 0x95CE, + 0x95CF, + 0x9610, + 0x9611, + 0x9612, + 0x9613, + 0x9614, + 0x9615, + 0x9616, + 0x9640, + 0x9641, + 0x9642, + 0x9643, + 0x9644, + 0x9645, + 0x9647, + 0x9648, + 0x9649, + 0x964a, + 0x964b, + 0x964c, + 0x964e, + 0x964f, + 0x9710, + 0x9711, + 0x9712, + 0x9713, + 0x9714, + 0x9715, + 0x9802, + 0x9803, + 0x9804, + 0x9805, + 0x9806, + 0x9807, + 0x9808, + 0x9809, + 0x980A, + 0x9900, + 0x9901, + 0x9903, + 0x9904, + 0x9905, + 0x9906, + 0x9907, + 0x9908, + 0x9909, + 0x990A, + 0x990B, + 0x990C, + 0x990D, + 0x990E, + 0x990F, + 0x9910, + 0x9913, + 0x9917, + 0x9918, + 0x9919, + 0x9990, + 0x9991, + 0x9992, + 0x9993, + 0x9994, + 0x9995, + 0x9996, + 0x9997, + 0x9998, + 0x9999, + 0x999A, + 0x999B, + 0x999C, + 0x999D, + 0x99A0, + 0x99A2, + 0x99A4, + /* radeon secondary ids */ + 0x3171, + 0x3e70, + 0x4164, + 0x4165, + 0x4166, + 0x4168, + 0x4170, + 0x4171, + 0x4172, + 0x4173, + 0x496e, + 0x4a69, + 0x4a6a, + 0x4a6b, + 0x4a70, + 0x4a74, + 0x4b69, + 0x4b6b, + 0x4b6c, + 0x4c6e, + 0x4e64, + 0x4e65, + 0x4e66, + 0x4e67, + 0x4e68, + 0x4e69, + 0x4e6a, + 0x4e71, + 0x4f73, + 0x5569, + 0x556b, + 0x556d, + 0x556f, + 0x5571, + 0x5854, + 0x5874, + 0x5940, + 0x5941, + 0x5b72, + 0x5b73, + 0x5b74, + 0x5b75, + 0x5d44, + 0x5d45, + 0x5d6d, + 0x5d6f, + 0x5d72, + 0x5d77, + 0x5e6b, + 0x5e6d, + 0x7120, + 0x7124, + 0x7129, + 0x712e, + 0x712f, + 0x7162, + 0x7163, + 0x7166, + 0x7167, + 0x7172, + 0x7173, + 0x71a0, + 0x71a1, + 0x71a3, + 0x71a7, + 0x71bb, + 0x71e0, + 0x71e1, + 0x71e2, + 0x71e6, + 0x71e7, + 0x71f2, + 0x7269, + 0x726b, + 0x726e, + 0x72a0, + 0x72a8, + 0x72b1, + 0x72b3, + 0x793f, +}; + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1273,11 +1984,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, struct drm_device *ddev; struct amdgpu_device *adev; unsigned long flags = ent->driver_data; - int ret, retry = 0; + int ret, retry = 0, i; bool supports_atomic = false; bool is_fw_fb; resource_size_t base, size; + if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) + amdgpu_aspm = 0; + + /* skip devices which are owned by radeon */ + for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { + if (amdgpu_unsupported_pciidlist[i] == pdev->device) + return -ENODEV; + } + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -1499,6 +2219,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly @@ -1506,6 +2227,13 @@ static int amdgpu_pmops_prepare(struct device *dev) if (amdgpu_device_supports_boco(drm_dev)) return pm_runtime_suspended(dev); + /* if we will not support s3 or s2i for the device + * then skip suspend + */ + if (!amdgpu_acpi_is_s0ix_active(adev) && + !amdgpu_acpi_is_s3_active(adev)) + return 1; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1916ec84dd71..e7845df6cad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - while (queue_bit-- >= 0) { + while (--queue_bit >= 0) { if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09a2fe839059..6744427577b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -152,21 +152,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; - struct pci_dev *parent; int r, acpi_status; dev = adev_to_drm(adev); - if (amdgpu_has_atpx() && - (amdgpu_is_atpx_hybrid() || - amdgpu_has_atpx_dgpu_power_cntl()) && - ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) - flags |= AMD_IS_PX; - - parent = pci_upstream_bridge(adev->pdev); - adev->has_pr3 = parent ? pci_pr3_present(parent) : false; - /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 01a78c786536..d62b770cc9dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1343,7 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; - dma_resv_lock(bo->base.resv, NULL); + if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) + return; r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); if (!WARN_ON(r)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 94126dc39688..8132f66177c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1892,7 +1892,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ac9a8cd21c4b..7d58bf410be0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -142,15 +142,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + unsigned long flags; if (crtc->state->event) { - spin_lock(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event); - spin_unlock(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b15cad78de9..fd37bb39774c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c39e53a41f13..db27fcf87cd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1272,6 +1272,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3c01be661014..93a4da4284ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -788,7 +788,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0a50fdaced7e..63c47f61d0df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU && - adev->gmc.real_vram_size > adev->gmc.aper_size) { + if ((adev->flags & AMD_IS_APU) && + adev->gmc.real_vram_size > adev->gmc.aper_size && + !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 63b890f1e8af..bef9610084f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c67e21244342..0e731016921b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1387,7 +1387,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) */ /* check whether both host-gpu and gpu-gpu xgmi links exist */ - if ((adev->flags & AMD_IS_APU) || + if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)) { adev->gmc.aper_base = @@ -1652,7 +1652,7 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); - amdgpu_bo_unref(&adev->gmc.pdb0_bo); + amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8931000dcd41..e37948c15769 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2062,6 +2062,10 @@ static int sdma_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU saves SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_fini(adev); } @@ -2069,6 +2073,10 @@ static int sdma_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU restores SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc97c364fd7..6439d5c3d8d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -607,8 +607,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) && - !(adev->apu_flags & AMD_APU_IS_RAVEN2)) + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1273,8 +1273,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3d18aab88b4e..6e56bef4fdf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -601,8 +601,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( - UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) @@ -1508,8 +1508,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v3_0_pause_dpg_mode(adev, 0, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 86afd37b098d..6688129df240 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1807,13 +1807,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) if (!args->start_addr || !args->size) return -EINVAL; - mutex_lock(&p->mutex); - r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, args->attrs); - mutex_unlock(&p->mutex); - return r; } #else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cfedfb1e8596..e574aa32a111 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); + if (!props2) + return -ENOMEM; + props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; @@ -1560,7 +1563,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) /* Fetch the CRAT table from ACPI */ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); if (status == AE_NOT_FOUND) { - pr_warn("CRAT table not found\n"); + pr_info("CRAT table not found\n"); return -ENODATA; } else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 88c483f69989..660eb7097cfc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -834,15 +834,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } /* Verify module parameters regarding mapped process number*/ - if ((hws_max_conc_proc < 0) - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { - dev_err(kfd_device, - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, - kfd->vm_info.vmid_num_kfd); + if (hws_max_conc_proc >= 0) + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + else kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; - } else - kfd->max_proc_per_quantum = hws_max_conc_proc; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 3eea4edee355..b8bdd796cd91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) event_waiters = kmalloc_array(num_events, sizeof(struct kfd_event_waiter), GFP_KERNEL); + if (!event_waiters) + return NULL; for (i = 0; (event_waiters) && (i < num_events) ; i++) { init_wait(&event_waiters[i].wait); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ed4bc5f844ce..766b3660c8c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) return ret; } - ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, - O_RDWR); - if (ret < 0) { - kfifo_free(&client->fifo); - kfree(client); - return ret; - } - *fd = ret; - init_waitqueue_head(&client->wait_queue); spin_lock_init(&client->lock); client->events = 0; @@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) list_add_rcu(&client->list, &dev->smi_clients); spin_unlock(&dev->smi_lock); + ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, + O_RDWR); + if (ret < 0) { + spin_lock(&dev->smi_lock); + list_del_rcu(&client->list); + spin_unlock(&dev->smi_lock); + + synchronize_rcu(); + + kfifo_free(&client->fifo); + kfree(client); + return ret; + } + *fd = ret; + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 16556ae892d4..ec75613618b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1279,9 +1279,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; - /* Disable vblank IRQs aggressively for power-saving */ - adev_to_drm(adev)->vblank_disable_immediate = true; - if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -2299,7 +2296,8 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); @@ -3230,7 +3228,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; - i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { @@ -3525,7 +3523,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap max - min); } -static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { @@ -3553,7 +3551,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } - return rc ? 0 : 1; + if (rc) + dm->actual_brightness[bl_idx] = user_brightness; } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -3815,6 +3814,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } #endif + /* Disable vblank IRQs aggressively for power-saving. */ + adev_to_drm(adev)->vblank_disable_immediate = true; + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -3861,6 +3863,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) update_connector_ext_caps(aconnector); if (amdgpu_dc_feature_mask & DC_PSR_MASK) amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; } @@ -7548,6 +7556,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } @@ -9307,7 +9318,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && - (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) + (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } #endif @@ -10217,10 +10228,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *conn_state; + struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; - for_each_new_connector_in_state(state, connector, conn_state, i) { + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d1d353a7c77d..46d6e65f6bd4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -446,6 +446,12 @@ struct amdgpu_display_manager { * cached backlight values. */ u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; + /** + * @actual_brightness: + * + * last successfully applied backlight values. + */ + u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; }; enum dsc_clock_force_state { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index e94ddd5e7b63..5c9f5214bc4e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -229,8 +229,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -388,8 +390,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, break; r = put_user((*(rd_buf + result)), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1316,8 +1320,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1333,8 +1339,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1503,8 +1511,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1520,8 +1530,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1688,8 +1700,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1705,8 +1719,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1869,8 +1885,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1886,8 +1904,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2045,8 +2065,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2062,8 +2084,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2102,8 +2126,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2119,8 +2145,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2174,8 +2202,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2191,8 +2221,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2246,8 +2278,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2263,8 +2297,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -3254,8 +3290,10 @@ static ssize_t dcc_en_bits_read( dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); - if (!rd_buf) + if (!rd_buf) { + kfree(dcc_en_bits); return -ENOMEM; + } for (i = 0; i < num_pipes; i++) offset += snprintf(rd_buf + offset, rd_buf_size - offset, @@ -3268,8 +3306,10 @@ static ssize_t dcc_en_bits_read( if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; *pos += 1; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 70a554f1e725..7072fb2ec07f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -74,10 +74,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) link = stream->link; - psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; - - if (psr_config.psr_version > 0) { - psr_config.psr_exit_link_training_required = 0x1; + if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + psr_config.psr_version = link->psr_settings.psr_version; psr_config.psr_frame_capture_indication_req = 0; psr_config.psr_rfb_setup_time = 0x37; psr_config.psr_sdp_transmit_line_num_deadline = 0x20; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 1861a147a7fa..5c5cbeb59c4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; /* Refresh bounding box */ + DC_FP_START(); clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); + DC_FP_END(); } static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 7046da14bb2a..329ce4e84b83 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -582,32 +582,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.82, - .sr_enter_plus_exit_time_us = 11.196, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.89, - .sr_enter_plus_exit_time_us = 11.24, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.748, - .sr_enter_plus_exit_time_us = 11.102, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 162ae7186124..21d2cbc3cbb2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -120,7 +120,11 @@ int dcn31_smu_send_msg_with_param( result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { - ASSERT(0); + if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && + param == TABLE_WATERMARKS) + DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else + ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1860ccc3f4f2..b37c4d2e7a1e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -891,10 +891,13 @@ static bool dc_construct(struct dc *dc, goto fail; #ifdef CONFIG_DRM_AMD_DC_DCN dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; -#endif - if (dc->res_pool->funcs->update_bw_bounding_box) + if (dc->res_pool->funcs->update_bw_bounding_box) { + DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + DC_FP_END(); + } +#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -1118,6 +1121,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; + if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 93c20844848c..605b96873d8c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3650,7 +3650,9 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); if (status != DC_OK) { - dm_error("%s: Read LTTPR caps data failed.\n", __func__); +#if defined(CONFIG_DRM_AMD_DC_DCN) + DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); +#endif return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index e94546187cf1..82f1f27baaf3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1599,6 +1599,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; @@ -1623,8 +1626,8 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + /*compare audio info*/ + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; @@ -1799,9 +1802,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool, del_pipe->stream_res.stream_enc, false); - /* Release link encoder from stream in new dc_state. */ - if (dc->res_pool->funcs->link_enc_unassign) - dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); if (del_pipe->stream_res.audio) update_audio_usage( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ab52d9a82cf..e0f58fab5e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -185,6 +185,7 @@ struct dc_caps { struct dc_color_caps color; bool vbios_lttpr_aware; bool vbios_lttpr_enable; + uint32_t max_otg_num; }; struct dc_bug_wa { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index f4f423d0b8c3..80595d7f060c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .program_watermarks = hubbub1_program_watermarks, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 3af49cdf89eb..93f31e4aeecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1052,9 +1052,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) void dcn10_verify_allow_pstate_change_high(struct dc *dc) { + struct hubbub *hubbub = dc->res_pool->hubbub; static bool should_log_hw_state; /* prevent hw state log by default */ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) { + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { int i = 0; if (should_log_hw_state) @@ -1063,8 +1067,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); BREAK_TO_DEBUGGER(); if (dcn10_hw_wa_force_recovery(dc)) { - /*check again*/ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) + /*check again*/ + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) BREAK_TO_DEBUGGER(); } } @@ -1435,6 +1439,9 @@ void dcn10_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -1487,8 +1494,6 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); @@ -2455,14 +2460,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a47ba1d45be9..9f8d7f92300b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2297,14 +2297,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 92a308ad1213..fbbdf9976183 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index f4414de96acc..152c9c5733f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 0950784bafa4..f83457375811 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -570,6 +570,9 @@ void dcn30_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -647,8 +650,6 @@ void dcn30_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index 1e3bd2e9cdcc..a046664e2031 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .hubbub_read_state = hubbub2_read_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 26ebe00a55f6..dea358b01791 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,12 +1622,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 90c73a1cb986..208d2dc8b1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -24,6 +24,7 @@ */ +#include <linux/delay.h> #include "dcn30/dcn30_hubbub.h" #include "dcn31_hubbub.h" #include "dm_services.h" @@ -138,8 +139,11 @@ static uint32_t convert_and_clamp( ret_val = wm_ns * refclk_mhz; ret_val /= 1000; - if (ret_val > clamp_value) + if (ret_val > clamp_value) { + /* clamping WMs is abnormal, unexpected and may lead to underflow*/ + ASSERT(0); ret_val = clamp_value; + } return ret_val; } @@ -159,7 +163,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); @@ -193,7 +197,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) @@ -203,7 +207,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); @@ -237,7 +241,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) @@ -247,7 +251,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); @@ -281,7 +285,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) @@ -291,7 +295,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); @@ -325,7 +329,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) @@ -351,7 +355,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" @@ -367,7 +371,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" @@ -383,7 +387,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" @@ -399,7 +403,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" @@ -416,7 +420,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" @@ -432,7 +436,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" @@ -448,7 +452,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" @@ -464,7 +468,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" @@ -481,7 +485,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" @@ -497,7 +501,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" @@ -513,7 +517,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" @@ -529,7 +533,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" @@ -546,7 +550,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" @@ -562,7 +566,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" @@ -578,7 +582,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" @@ -594,7 +598,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" @@ -625,7 +629,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" @@ -642,7 +646,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" @@ -659,7 +663,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" @@ -676,7 +680,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" @@ -946,6 +950,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, } } +static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* + * Pstate latency is ~20us so if we wait over 40us and pstate allow + * still not asserted, we are probably stuck and going to hang + */ + const unsigned int pstate_wait_timeout_us = 100; + const unsigned int pstate_wait_expected_timeout_us = 40; + + static unsigned int max_sampled_pstate_wait_us; /* data collection */ + static bool forced_pstate_allow; /* help with revert wa */ + + unsigned int debug_data = 0; + unsigned int i; + + if (forced_pstate_allow) { + /* we hacked to force pstate allow to prevent hang last time + * we verify_allow_pstate_change_high. so disable force + * here so we can check status + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); + forced_pstate_allow = false; + } + + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); + + for (i = 0; i < pstate_wait_timeout_us; i++) { + debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + + /* Debug bit is specific to ASIC. */ + if (debug_data & (1 << 26)) { + if (i > pstate_wait_expected_timeout_us) + DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); + return true; + } + if (max_sampled_pstate_wait_us < i) + max_sampled_pstate_wait_us = i; + + udelay(1); + } + + /* force pstate allow to prevent system hang + * and break to debugger to investigate + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); + forced_pstate_allow = true; + + DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", + debug_data); + + return false; +} + static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, @@ -958,6 +1021,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_watermarks = hubbub31_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, @@ -979,5 +1043,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31, hubbub31->detile_buf_size = det_size_kb * 1024; hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; + + hubbub31->debug_test_index_pstate = 0x6; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 3afa1159a5f7..b72d080b302a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -204,6 +204,9 @@ void dcn31_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -287,8 +290,6 @@ void dcn31_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index d4fe5352421f..a5ef9d5e7685 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -940,7 +940,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = false, + .sanity_checks = true, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 56055df2e8d2..9009b92490f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -70,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) @@ -84,6 +85,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcfla CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) @@ -99,6 +101,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o +DML += dsc/rc_calc_fpu.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h index e5fac9f4181d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c new file mode 100644 index 000000000000..3ee858f311d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -0,0 +1,291 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "rc_calc_fpu.h" + +#include "qp_tables.h" +#include "amdgpu_dm/dc_fpu.h" + +#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) + +#define MODE_SELECT(val444, val422, val420) \ + (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) + + +#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ + table = qp_table_##mode##_##bpc##bpc_##max; \ + table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ + break + +static int median3(int a, int b, int c) +{ + if (a > b) + swap(a, b); + if (b > c) + swap(b, c); + if (a > b) + swap(b, c); + + return b; +} + +static double dsc_roundf(double num) +{ + if (num < 0.0) + num = num - 0.5; + else + num = num + 0.5; + + return (int)(num); +} + +static double dsc_ceil(double num) +{ + double retval = (int)num; + + if (retval != num && num > 0) + retval = num + 1; + + return (int)retval; +} + +static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, + enum max_min max_min, float bpp) +{ + int mode = MODE_SELECT(444, 422, 420); + int sel = table_hash(mode, bpc, max_min); + int table_size = 0; + int index; + const struct qp_entry *table = 0L; + + // alias enum + enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; + switch (sel) { + TABLE_CASE(444, 8, max); + TABLE_CASE(444, 8, min); + TABLE_CASE(444, 10, max); + TABLE_CASE(444, 10, min); + TABLE_CASE(444, 12, max); + TABLE_CASE(444, 12, min); + TABLE_CASE(422, 8, max); + TABLE_CASE(422, 8, min); + TABLE_CASE(422, 10, max); + TABLE_CASE(422, 10, min); + TABLE_CASE(422, 12, max); + TABLE_CASE(422, 12, min); + TABLE_CASE(420, 8, max); + TABLE_CASE(420, 8, min); + TABLE_CASE(420, 10, max); + TABLE_CASE(420, 10, min); + TABLE_CASE(420, 12, max); + TABLE_CASE(420, 12, min); + } + + if (table == 0) + return; + + index = (bpp - table[0].bpp) * 2; + + /* requested size is bigger than the table */ + if (index >= table_size) { + dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); + return; + } + + memcpy(qps, table[index].qps, sizeof(qp_set)); +} + +static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) +{ + int *p = ofs; + + if (mode == CM_444 || mode == CM_RGB) { + *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else if (mode == CM_422) { + *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else { + *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } +} + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version) +{ + float bpp; + float bpp_group; + float initial_xmit_delay_factor; + int padding_pixels; + int i; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + /* in native_422 or native_420 modes, the bits_per_pixel is double the + * target bpp (the latter is what calc_rc_params expects) + */ + if (is_navite_422_or_420) + bpp /= 2.0; + + rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + + bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); + + switch (cm) { + case CM_420: + rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); + break; + case CM_422: + rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + case CM_444: + case CM_RGB: + rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + } + + initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; + rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); + + if (cm == CM_422 || cm == CM_420) + slice_width /= 2; + + padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; + if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { + if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) + rc->initial_xmit_delay++; + } + + rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_det_thresh = 2 << (bpc - 8); + + get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); + get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); + if (cm == CM_444 && minor_version == 1) { + for (i = 0; i < QP_SET_SIZE; ++i) { + rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; + rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; + } + } + get_ofs_set(rc->ofs, cm, bpp); + + /* fixed parameters */ + rc->rc_model_size = 8192; + rc->rc_edge_factor = 6; + rc->rc_tgt_offset_hi = 3; + rc->rc_tgt_offset_lo = 3; + + rc->rc_buf_thresh[0] = 896; + rc->rc_buf_thresh[1] = 1792; + rc->rc_buf_thresh[2] = 2688; + rc->rc_buf_thresh[3] = 3584; + rc->rc_buf_thresh[4] = 4480; + rc->rc_buf_thresh[5] = 5376; + rc->rc_buf_thresh[6] = 6272; + rc->rc_buf_thresh[7] = 6720; + rc->rc_buf_thresh[8] = 7168; + rc->rc_buf_thresh[9] = 7616; + rc->rc_buf_thresh[10] = 7744; + rc->rc_buf_thresh[11] = 7872; + rc->rc_buf_thresh[12] = 8000; + rc->rc_buf_thresh[13] = 8064; +} + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420) +{ + float bpp; + u32 bytes_per_pixel; + double d_bytes_per_pixel; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; + // TODO: Make sure the formula for calculating this is precise (ceiling + // vs. floor, and at what point they should be applied) + if (is_navite_422_or_420) + d_bytes_per_pixel /= 2; + + bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); + + return bytes_per_pixel; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h new file mode 100644 index 000000000000..b93b95409fbe --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __RC_CALC_FPU_H__ +#define __RC_CALC_FPU_H__ + +#include "os_types.h" +#include <drm/drm_dsc.h> + +#define QP_SET_SIZE 15 + +typedef int qp_set[QP_SET_SIZE]; + +struct rc_params { + int rc_quant_incr_limit0; + int rc_quant_incr_limit1; + int initial_fullness_offset; + int initial_xmit_delay; + int first_line_bpg_offset; + int second_line_bpg_offset; + int flatness_min_qp; + int flatness_max_qp; + int flatness_det_thresh; + qp_set qp_min; + qp_set qp_max; + qp_set ofs; + int rc_model_size; + int rc_edge_factor; + int rc_tgt_offset_hi; + int rc_tgt_offset_lo; + int rc_buf_thresh[QP_SET_SIZE - 1]; +}; + +enum colour_mode { + CM_RGB, /* 444 RGB */ + CM_444, /* 444 YUV or simple 422 */ + CM_422, /* native 422 */ + CM_420 /* native 420 */ +}; + +enum bits_per_comp { + BPC_8 = 8, + BPC_10 = 10, + BPC_12 = 12 +}; + +enum max_min { + DAL_MM_MIN = 0, + DAL_MM_MAX = 1 +}; + +struct qp_entry { + float bpp; + const qp_set qps; +}; + +typedef struct qp_entry qp_table[]; + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420); + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 8d31eb75c6a6..a2537229ee88 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,35 +1,6 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. - -ifdef CONFIG_X86 -dsc_ccflags := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -dsc_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dsc_ccflags += -mpreferred-stack-boundary=4 -else -dsc_ccflags += -msse2 -endif -endif - -CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) - DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 7b294f637881..b19d3aeb5962 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -23,266 +23,7 @@ * Authors: AMD * */ -#include <drm/drm_dsc.h> - -#include "os_types.h" #include "rc_calc.h" -#include "qp_tables.h" - -#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) - -#define MODE_SELECT(val444, val422, val420) \ - (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) - - -#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ - table = qp_table_##mode##_##bpc##bpc_##max; \ - table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ - break - - -static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, - enum max_min max_min, float bpp) -{ - int mode = MODE_SELECT(444, 422, 420); - int sel = table_hash(mode, bpc, max_min); - int table_size = 0; - int index; - const struct qp_entry *table = 0L; - - // alias enum - enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; - switch (sel) { - TABLE_CASE(444, 8, max); - TABLE_CASE(444, 8, min); - TABLE_CASE(444, 10, max); - TABLE_CASE(444, 10, min); - TABLE_CASE(444, 12, max); - TABLE_CASE(444, 12, min); - TABLE_CASE(422, 8, max); - TABLE_CASE(422, 8, min); - TABLE_CASE(422, 10, max); - TABLE_CASE(422, 10, min); - TABLE_CASE(422, 12, max); - TABLE_CASE(422, 12, min); - TABLE_CASE(420, 8, max); - TABLE_CASE(420, 8, min); - TABLE_CASE(420, 10, max); - TABLE_CASE(420, 10, min); - TABLE_CASE(420, 12, max); - TABLE_CASE(420, 12, min); - } - - if (table == 0) - return; - - index = (bpp - table[0].bpp) * 2; - - /* requested size is bigger than the table */ - if (index >= table_size) { - dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); - return; - } - - memcpy(qps, table[index].qps, sizeof(qp_set)); -} - -static double dsc_roundf(double num) -{ - if (num < 0.0) - num = num - 0.5; - else - num = num + 0.5; - - return (int)(num); -} - -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - -static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) -{ - int *p = ofs; - - if (mode == CM_444 || mode == CM_RGB) { - *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else if (mode == CM_422) { - *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else { - *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } -} - -static int median3(int a, int b, int c) -{ - if (a > b) - swap(a, b); - if (b > c) - swap(b, c); - if (a > b) - swap(b, c); - - return b; -} - -static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, - enum bits_per_comp bpc, u16 drm_bpp, - bool is_navite_422_or_420, - int slice_width, int slice_height, - int minor_version) -{ - float bpp; - float bpp_group; - float initial_xmit_delay_factor; - int padding_pixels; - int i; - - bpp = ((float)drm_bpp / 16.0); - /* in native_422 or native_420 modes, the bits_per_pixel is double the - * target bpp (the latter is what calc_rc_params expects) - */ - if (is_navite_422_or_420) - bpp /= 2.0; - - rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - - bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); - - switch (cm) { - case CM_420: - rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); - break; - case CM_422: - rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - case CM_444: - case CM_RGB: - rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - } - - initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; - rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); - - if (cm == CM_422 || cm == CM_420) - slice_width /= 2; - - padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; - if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { - if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) - rc->initial_xmit_delay++; - } - - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_det_thresh = 2 << (bpc - 8); - - get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); - get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); - if (cm == CM_444 && minor_version == 1) { - for (i = 0; i < QP_SET_SIZE; ++i) { - rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; - rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; - } - } - get_ofs_set(rc->ofs, cm, bpp); - - /* fixed parameters */ - rc->rc_model_size = 8192; - rc->rc_edge_factor = 6; - rc->rc_tgt_offset_hi = 3; - rc->rc_tgt_offset_lo = 3; - - rc->rc_buf_thresh[0] = 896; - rc->rc_buf_thresh[1] = 1792; - rc->rc_buf_thresh[2] = 2688; - rc->rc_buf_thresh[3] = 3584; - rc->rc_buf_thresh[4] = 4480; - rc->rc_buf_thresh[5] = 5376; - rc->rc_buf_thresh[6] = 6272; - rc->rc_buf_thresh[7] = 6720; - rc->rc_buf_thresh[8] = 7168; - rc->rc_buf_thresh[9] = 7616; - rc->rc_buf_thresh[10] = 7744; - rc->rc_buf_thresh[11] = 7872; - rc->rc_buf_thresh[12] = 8000; - rc->rc_buf_thresh[13] = 8064; -} - -static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} /** * calc_rc_params - reads the user's cmdline mode diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index 262f06afcbf9..c2340e001b57 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -27,55 +27,7 @@ #ifndef __RC_CALC_H__ #define __RC_CALC_H__ - -#define QP_SET_SIZE 15 - -typedef int qp_set[QP_SET_SIZE]; - -struct rc_params { - int rc_quant_incr_limit0; - int rc_quant_incr_limit1; - int initial_fullness_offset; - int initial_xmit_delay; - int first_line_bpg_offset; - int second_line_bpg_offset; - int flatness_min_qp; - int flatness_max_qp; - int flatness_det_thresh; - qp_set qp_min; - qp_set qp_max; - qp_set ofs; - int rc_model_size; - int rc_edge_factor; - int rc_tgt_offset_hi; - int rc_tgt_offset_lo; - int rc_buf_thresh[QP_SET_SIZE - 1]; -}; - -enum colour_mode { - CM_RGB, /* 444 RGB */ - CM_444, /* 444 YUV or simple 422 */ - CM_422, /* native 422 */ - CM_420 /* native 420 */ -}; - -enum bits_per_comp { - BPC_8 = 8, - BPC_10 = 10, - BPC_12 = 12 -}; - -enum max_min { - DAL_MM_MIN = 0, - DAL_MM_MAX = 1 -}; - -struct qp_entry { - float bpp; - const qp_set qps; -}; - -typedef struct qp_entry qp_table[]; +#include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index ef830aded5b1..1e19dd674e5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#include "os_types.h" #include <drm/drm_dsc.h> #include "dscc_types.h" #include "rc_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 713f5558f5e1..9195dec294c2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -154,6 +154,8 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index ed54e1c819be..a728087b3f3d 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -266,14 +266,6 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -402,12 +394,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 571fcf23cea9..a3a9ea077f50 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,6 +72,9 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) +#endif struct dal_logger; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 57f198de5e2c..4e075b01d48b 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -100,7 +100,8 @@ enum vsc_packet_revision { //PB7 = MD0 #define MASK_VTEM_MD0__VRR_EN 0x01 #define MASK_VTEM_MD0__M_CONST 0x02 -#define MASK_VTEM_MD0__RESERVED2 0x0C +#define MASK_VTEM_MD0__QMS_EN 0x04 +#define MASK_VTEM_MD0__RESERVED2 0x08 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 //MD1 @@ -109,7 +110,7 @@ enum vsc_packet_revision { //MD2 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 #define MASK_VTEM_MD2__RB 0x04 -#define MASK_VTEM_MD2__RESERVED3 0xF8 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8 //MD3 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 08362d506534..a68496b3f929 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1045,6 +1045,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; + /* Don't use baco for reset in S3. + * This is a workaround for some platforms + * where entering BACO during suspend + * seems to cause reboots or hangs. + * This might be related to the fact that BACO controls + * power to the whole GPU including devices like audio and USB. + * Powering down/up everything may adversely affect these other + * devices. Needs more investigation. + */ + if (adev->in_s3) + return false; if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) return false; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 640db5020ccc..6aaf1230655f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2117,8 +2117,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } - /* setting should not be allowed from VF */ - if (amdgpu_sriov_vf(adev)) { + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1f406f21b452..cf74621f94a7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : + (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 6dc83cfad9d8..8acdb244b99f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -138,7 +138,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, uint32_t *min, uint32_t *max) { - int ret = 0; + int ret = -ENOTSUPP; if (!min && !max) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index f89bf49965fc..574a9d7f7a5e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -418,6 +418,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) return 0; } +static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t *board_reserved; + uint16_t *freq_table_gfx; + uint32_t i; + + /* Fix some OEM SKU specific stability issues */ + GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + (adev->pdev->subsystem_device == 0x16C2) && + (adev->pdev->subsystem_vendor == 0x1043)) + board_reserved[0] = 1387; + + GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + ((adev->pdev->subsystem_device == 0x16C2) || + (adev->pdev->subsystem_device == 0x133C)) && + (adev->pdev->subsystem_vendor == 0x1043)) { + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { + if (freq_table_gfx[i] > 2500) + freq_table_gfx[i] = 2500; + } + } + + return 0; +} + static int sienna_cichlid_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -438,7 +468,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) if (ret) return ret; - return ret; + return sienna_cichlid_patch_pptable_quirk(smu); } static int sienna_cichlid_tables_init(struct smu_context *smu) @@ -1278,21 +1308,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.soc_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.peak = gfx_table->max; - if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) - pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; - if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) - pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.peak = soc_table->max; - if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) + + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_DIMGREY_CAVEFISH: + pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_BEIGE_GOBY: + pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h index 38cd0ece24f6..42f705c7a36f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h @@ -33,6 +33,14 @@ typedef enum { #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 + +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 + extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..0e1a843608e4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -291,14 +291,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) static int yellow_carp_mode_reset(struct smu_context *smu, int type) { - int ret = 0, index = 0; - - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, - SMU_MSG_GfxDeviceDriverReset); - if (index < 0) - return index == -EACCES ? 0 : index; + int ret = 0; - ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); if (ret) dev_err(smu->adev->dev, "Failed to mode reset!\n"); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h index 96501152bafa..4e6a442c3886 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx.h +++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h @@ -12,6 +12,7 @@ struct aspeed_gfx { struct regmap *scu; u32 dac_reg; + u32 int_clr_reg; u32 vga_scratch_reg; u32 throd_val; u32 scan_line_max; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 65f172807a0d..5b0fa65aeb25 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -60,6 +60,7 @@ struct aspeed_gfx_config { u32 dac_reg; /* DAC register in SCU */ + u32 int_clear_reg; /* Interrupt clear register */ u32 vga_scratch_reg; /* VGA scratch register in SCU */ u32 throd_val; /* Default Threshold Seting */ u32 scan_line_max; /* Max memory size of one scan line */ @@ -67,6 +68,7 @@ struct aspeed_gfx_config { static const struct aspeed_gfx_config ast2400_config = { .dac_reg = 0x2c, + .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12), .scan_line_max = 64, @@ -74,14 +76,24 @@ static const struct aspeed_gfx_config ast2400_config = { static const struct aspeed_gfx_config ast2500_config = { .dac_reg = 0x2c, + .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c), .scan_line_max = 128, }; +static const struct aspeed_gfx_config ast2600_config = { + .dac_reg = 0xc0, + .int_clear_reg = 0x68, + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70), + .scan_line_max = 128, +}; + static const struct of_device_id aspeed_gfx_match[] = { { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config }, { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-gfx", .data = &ast2600_config }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_gfx_match); @@ -119,7 +131,7 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) if (reg & CRT_CTRL_VERTICAL_INTR_STS) { drm_crtc_handle_vblank(&priv->pipe.crtc); - writel(reg, priv->base + CRT_CTRL1); + writel(reg, priv->base + priv->int_clr_reg); return IRQ_HANDLED; } @@ -147,6 +159,7 @@ static int aspeed_gfx_load(struct drm_device *drm) config = match->data; priv->dac_reg = config->dac_reg; + priv->int_clr_reg = config->int_clear_reg; priv->vga_scratch_reg = config->vga_scratch_reg; priv->throd_val = config->throd_val; priv->scan_line_max = config->scan_line_max; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 431b6e12a81f..68ec45abc1fb 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -8,7 +8,6 @@ config DRM_BRIDGE config DRM_PANEL_BRIDGE def_bool y depends on DRM_BRIDGE - depends on DRM_KMS_HELPER select DRM_PANEL help DRM bridge wrapper of DRM panels @@ -30,6 +29,7 @@ config DRM_CDNS_DSI config DRM_CHIPONE_ICN6211 tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge" depends on OF + select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE help diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 05e3abb5a0c9..1b00dfda6e0d 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -169,6 +169,7 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) +#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6) #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 76555ae64e9c..c02f3ec60b04 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511) * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to * go to standby again. To avoid this we ignore the HPD pin for the - * first few seconds after enabling the output. + * first few seconds after enabling the output. On the other hand + * adv7535 require to enable HPD Override bit for proper HPD. */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_NONE); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); } static void adv7511_power_on(struct adv7511 *adv7511) @@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511) static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, 0); + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); @@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) status = connector_status_disconnected; } else { /* Renable HPD sensing */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_BOTH); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index ea414cd349b5..392a9c56e9a0 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -791,7 +791,8 @@ static int segments_edid_read(struct anx7625_data *ctx, static int sp_tx_edid_read(struct anx7625_data *ctx, u8 *pedid_blocks_buf) { - u8 offset, edid_pos; + u8 offset; + int edid_pos; int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index e6e331071a00..dd57b104aec3 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1286,6 +1286,7 @@ static const struct of_device_id cdns_dsi_of_match[] = { { .compatible = "cdns,dsi" }, { }, }; +MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index af07eeb47ca0..691039aba87f 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -861,18 +861,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); drm_mode_debug_printmodeline(adjusted_mode); - pm_runtime_get_sync(dev); + if (pm_runtime_resume_and_get(dev) < 0) + return; if (clk_prepare_enable(dsi->lcdif_clk) < 0) - return; + goto runtime_put; if (clk_prepare_enable(dsi->core_clk) < 0) - return; + goto runtime_put; /* Step 1 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); - return; + goto runtime_put; } /* Step 2 from DSI reset-out instructions */ @@ -882,13 +883,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, ret = reset_control_deassert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); - return; + goto runtime_put; } ret = reset_control_deassert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); - return; + goto runtime_put; } + + return; + +runtime_put: + pm_runtime_put_sync(dev); } static void @@ -1204,6 +1210,7 @@ static int nwl_dsi_probe(struct platform_device *pdev) ret = nwl_dsi_select_input(dsi); if (ret < 0) { + pm_runtime_disable(dev); mipi_dsi_host_unregister(&dsi->dsi_host); return ret; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 843265d7f1b1..ec7745c31da0 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) if (ret) { dev_err(ctx->dev, "Failed to register RC device\n"); ctx->error = ret; - rc_free_device(ctx->rc_dev); + rc_free_device(rc_dev); return; } ctx->rc_dev = rc_dev; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index e1211a5b334b..25d58dcfc87e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (!output_fmts) return NULL; - /* If dw-hdmi is the only bridge, avoid negociating with ourselves */ - if (list_is_singular(&bridge->encoder->bridge_chain)) { + /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ + if (list_is_singular(&bridge->encoder->bridge_chain) || + list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index e44e18a0112a..56c3fd08c6a0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1199,6 +1199,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { dev_err(dev, "Failed to register MIPI host: %d\n", ret); + pm_runtime_disable(dev); dw_mipi_dsi_debugfs_remove(dsi); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 4d08246f930c..45a5f1e48f0e 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1473,6 +1473,7 @@ static inline void ti_sn_gpio_unregister(void) {} static void ti_sn65dsi86_runtime_disable(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } @@ -1532,11 +1533,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, "failed to get reference clock\n"); pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(pdata->dev, 500); + pm_runtime_use_autosuspend(pdata->dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); if (ret) return ret; - pm_runtime_set_autosuspend_delay(pdata->dev, 500); - pm_runtime_use_autosuspend(pdata->dev); ti_sn65dsi86_debugfs_init(pdata); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2c0c6ec92820..ff2bc9a11801 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1001,7 +1001,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state, * it's in self refresh mode and needs to be fully disabled. */ return old_state->active || - (old_state->self_refresh_active && !new_state->enable) || + (old_state->self_refresh_active && !new_state->active) || new_state->self_refresh_active; } diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 909f31833181..f195c7013137 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, state->mode_blob = NULL; if (mode) { + struct drm_property_blob *blob; + drm_mode_convert_to_umode(&umode, mode); - state->mode_blob = - drm_property_create_blob(state->crtc->dev, - sizeof(umode), - &umode); - if (IS_ERR(state->mode_blob)) - return PTR_ERR(state->mode_blob); + blob = drm_property_create_blob(crtc->dev, + sizeof(umode), &umode); + if (IS_ERR(blob)) + return PTR_ERR(blob); drm_mode_copy(&state->mode, mode); + + state->mode_blob = blob; state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 2ba257b1ae20..e9b7926d9b66 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2233,6 +2233,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ea9a79bc9583..ee6f44f9a81c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4776,7 +4776,8 @@ bool drm_detect_monitor_audio(struct edid *edid) if (!edid_ext) goto end; - has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + has_audio = (edid_ext[0] == CEA_EXT && + (edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); @@ -5003,21 +5004,21 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, if (hdmi[6] & DRM_EDID_HDMI_DC_30) { dc_bpc = 10; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30; DRM_DEBUG("%s: HDMI sink does deep color 30.\n", connector->name); } if (hdmi[6] & DRM_EDID_HDMI_DC_36) { dc_bpc = 12; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36; DRM_DEBUG("%s: HDMI sink does deep color 36.\n", connector->name); } if (hdmi[6] & DRM_EDID_HDMI_DC_48) { dc_bpc = 16; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48; DRM_DEBUG("%s: HDMI sink does deep color 48.\n", connector->name); } @@ -5032,16 +5033,9 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, connector->name, dc_bpc); info->bpc = dc_bpc; - /* - * Deep color support mandates RGB444 support for all video - * modes and forbids YCRCB422 support for all video modes per - * HDMI 1.3 spec. - */ - info->color_formats = DRM_COLOR_FORMAT_RGB444; - /* YCRCB444 is optional according to spec. */ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { - info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes; DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", connector->name); } @@ -5205,6 +5199,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5253,7 +5248,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 22bf690910b2..ed589e7182bb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2346,6 +2346,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; + fbi->flags = FBINFO_DEFAULT; drm_fb_helper_fill_info(fbi, fb_helper, sizes); @@ -2353,19 +2354,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_buffer = vzalloc(fbi->screen_size); if (!fbi->screen_buffer) return -ENOMEM; + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; fbi->fbdefio = &drm_fbdev_defio; - fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ ret = drm_client_buffer_vmap(fb_helper->buffer, &map); if (ret) return ret; - if (map.is_iomem) + if (map.is_iomem) { fbi->screen_base = map.vaddr_iomem; - else + } else { fbi->screen_buffer = map.vaddr; + fbi->flags |= FBINFO_VIRTFB; + } /* * Shamelessly leak the physical address to user-space. As diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 9d05674550a4..6533efa84020 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -515,6 +515,7 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_DONTEXPAND; cma_obj = to_drm_gem_cma_obj(obj); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 448c2f2d803a..f5ab891731d0 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -166,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c313a5b4549c..7e48dcd1bee4 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, &args->handle); } + +/* + * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be + * added as timeline fence to a chain again. + */ +static int drm_syncobj_flatten_chain(struct dma_fence **f) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(*f); + struct dma_fence *tmp, **fences; + struct dma_fence_array *array; + unsigned int count; + + if (!chain) + return 0; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + ++count; + + fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL); + if (!fences) + return -ENOMEM; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + fences[count++] = dma_fence_get(tmp); + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto free_fences; + + dma_fence_put(*f); + *f = &array->base; + return 0; + +free_fences: + while (count--) + dma_fence_put(fences[count]); + + kfree(fences); + return -ENOMEM; +} + static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, struct drm_syncobj_transfer *args) { struct drm_syncobj *timeline_syncobj = NULL; - struct dma_fence *fence; struct dma_fence_chain *chain; + struct dma_fence *fence; int ret; timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); @@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, args->src_point, args->flags, &fence); if (ret) - goto err; + goto err_put_timeline; + + ret = drm_syncobj_flatten_chain(&fence); + if (ret) + goto err_free_fence; + chain = dma_fence_chain_alloc(); if (!chain) { ret = -ENOMEM; - goto err1; + goto err_free_fence; } + drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); -err1: +err_free_fence: dma_fence_put(fence); -err: +err_put_timeline: drm_syncobj_put(timeline_syncobj); return ret; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index f960f5d7664e..fe6b34774483 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -101,6 +101,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 335ba9f43d8f..26cf75422945 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -211,6 +211,8 @@ i915-y += \ display/intel_dpio_phy.o \ display/intel_dpll.o \ display/intel_dpll_mgr.o \ + display/intel_dpt.o \ + display/intel_drrs.o \ display/intel_dsb.o \ display/intel_fb.o \ display/intel_fbc.o \ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 4b94256d7319..ea48620f76d9 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -681,6 +681,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) unsigned int max_bw_point = 0, max_bw = 0; unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + bool changed = false; u32 mask = 0; /* FIXME earlier gens need some checks too */ @@ -724,6 +725,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; + changed = true; + drm_dbg_kms(&dev_priv->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -731,7 +734,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->num_active_planes[crtc->pipe]); } - if (!new_bw_state) + old_bw_state = intel_atomic_get_old_bw_state(state); + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (new_bw_state && + intel_can_enable_sagv(dev_priv, old_bw_state) != + intel_can_enable_sagv(dev_priv, new_bw_state)) + changed = true; + + /* + * If none of our inputs (data rates, number of active + * planes, SAGV yes/no) changed then nothing to do here. + */ + if (!changed) return 0; ret = intel_atomic_lock_global_state(&new_bw_state->base); @@ -804,7 +819,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) * cause. */ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) { - allowed_points = BIT(max_bw_point); + allowed_points &= ADLS_PSF_PT_MASK; + allowed_points |= BIT(max_bw_point); drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n", max_bw_point); } @@ -814,7 +830,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) */ new_bw_state->qgv_points_mask = ~allowed_points & mask; - old_bw_state = intel_atomic_get_old_bw_state(state); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 46c6eecbd917..0ceaed1c9656 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -30,19 +30,19 @@ struct intel_bw_state { */ u8 pipe_sagv_reject; + /* bitmask of active pipes */ + u8 active_pipes; + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. */ - u8 qgv_points_mask; + u16 qgv_points_mask; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; - /* bitmask of active pipes */ - u8 active_pipes; - int min_cdclk; }; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 82e5064b4ce7..f61901e26409 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -40,6 +40,7 @@ #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" +#include "intel_drrs.h" #include "intel_dsi.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 17f44ffea586..c9b051ab18e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -84,6 +84,7 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp_link_training.h" +#include "intel_dpt.h" #include "intel_fbc.h" #include "intel_fdi.h" #include "intel_fbdev.h" @@ -126,182 +127,6 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -struct i915_dpt { - struct i915_address_space vm; - - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - void __iomem *iomem; -}; - -#define i915_is_dpt(vm) ((vm)->is_dpt) - -static inline struct i915_dpt * -i915_vm_to_dpt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); - GEM_BUG_ON(!i915_is_dpt(vm)); - return container_of(vm, struct i915_dpt, vm); -} - -#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) - -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void dpt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - - gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, - vm->pte_encode(addr, level, flags)); -} - -static void dpt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); - struct sgt_iter sgt_iter; - dma_addr_t addr; - int i; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - i = vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(&base[i++], pte_encode | addr); -} - -static void dpt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ -} - -static void dpt_bind_vma(struct i915_address_space *vm, - struct i915_vm_pt_stash *stash, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_gem_object *obj = vma->obj; - u32 pte_flags; - - /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ - pte_flags = 0; - if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) - pte_flags |= PTE_READ_ONLY; - if (i915_gem_object_is_lmem(obj)) - pte_flags |= PTE_LM; - - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - /* - * Without aliasing PPGTT there's no difference between - * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally - * upgrade to both bound if we bind either to avoid double-binding. - */ - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); -} - -static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) -{ - vm->clear_range(vm, vma->node.start, vma->size); -} - -static void dpt_cleanup(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_gem_object_put(dpt->obj); -} - -static struct i915_address_space * -intel_dpt_create(struct intel_framebuffer *fb) -{ - struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; - struct drm_i915_private *i915 = to_i915(obj->dev); - struct drm_i915_gem_object *dpt_obj; - struct i915_address_space *vm; - struct i915_dpt *dpt; - size_t size; - int ret; - - if (intel_fb_needs_pot_stride_remap(fb)) - size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); - else - size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); - - size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); - - if (HAS_LMEM(i915)) - dpt_obj = i915_gem_object_create_lmem(i915, size, 0); - else - dpt_obj = i915_gem_object_create_stolen(i915, size); - if (IS_ERR(dpt_obj)) - return ERR_CAST(dpt_obj); - - ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); - if (ret) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(ret); - } - - dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); - if (!dpt) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(-ENOMEM); - } - - vm = &dpt->vm; - - vm->gt = &i915->gt; - vm->i915 = i915; - vm->dma = i915->drm.dev; - vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - vm->is_dpt = true; - - i915_address_space_init(vm, VM_CLASS_DPT); - - vm->insert_page = dpt_insert_page; - vm->clear_range = dpt_clear_range; - vm->insert_entries = dpt_insert_entries; - vm->cleanup = dpt_cleanup; - - vm->vma_ops.bind_vma = dpt_bind_vma; - vm->vma_ops.unbind_vma = dpt_unbind_vma; - vm->vma_ops.set_pages = ggtt_set_pages; - vm->vma_ops.clear_pages = clear_pages; - - vm->pte_encode = gen8_ggtt_pte_encode; - - dpt->obj = dpt_obj; - - return &dpt->vm; -} - -static void intel_dpt_destroy(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vm_close(&dpt->vm); -} - /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { @@ -1833,8 +1658,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) } } -static void intel_plane_disable_noatomic(struct intel_crtc *crtc, - struct intel_plane *plane) +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = @@ -1879,49 +1704,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_wait_for_vblank(dev_priv, crtc->pipe); } -static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) -{ - struct drm_i915_private *i915 = vm->i915; - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - intel_wakeref_t wakeref; - struct i915_vma *vma; - void __iomem *iomem; - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - atomic_inc(&i915->gpu_error.pending_fb_pin); - - vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, - HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); - if (IS_ERR(vma)) - goto err; - - iomem = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(iomem)) { - vma = iomem; - goto err; - } - - dpt->vma = vma; - dpt->iomem = iomem; - - i915_vma_get(vma); - -err: - atomic_dec(&i915->gpu_error.pending_fb_pin); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - - return vma; -} - -static void intel_dpt_unpin(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vma_unpin_iomap(dpt->vma); - i915_vma_put(dpt->vma); -} - static bool intel_reuse_initial_plane_obj(struct drm_i915_private *i915, const struct intel_initial_plane_config *plane_config, @@ -13435,6 +13217,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, vlv_wm_sanitize(dev_priv); } else if (DISPLAY_VER(dev_priv) >= 9) { skl_wm_get_hw_state(dev_priv); + skl_wm_sanitize(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_wm_get_hw_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 284936f0ddab..6a7a91b38080 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -629,6 +629,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state); +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane); unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, int color_plane); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 8fdacb252bb1..b136a0fc0963 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -13,6 +13,7 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_hdcp.h" #include "intel_hdmi.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d55363f1fa10..631cf7d4323c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -56,6 +56,7 @@ #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" +#include "intel_drrs.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" #include "intel_hdmi.h" @@ -1610,46 +1611,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } -static void -intel_dp_drrs_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) -{ - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int pixel_clock; - - if (pipe_config->vrr.enable) - return; - - /* - * DRRS and PSR can't be enable together, so giving preference to PSR - * as it allows more power-savings by complete shutting down display, - * so to guarantee this, intel_dp_drrs_compute_config() must be called - * after intel_psr_compute_config(). - */ - if (pipe_config->has_psr) - return; - - if (!intel_connector->panel.downclock_mode || - dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - pipe_config->has_drrs = true; - - pixel_clock = intel_connector->panel.downclock_mode->clock; - if (pipe_config->splitter.enable) - pixel_clock /= pipe_config->splitter.link_count; - - intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, - pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); - - /* FIXME: abstract this better */ - if (pipe_config->splitter.enable) - pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; -} - int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -4638,7 +4599,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) struct intel_dp *intel_dp = &dig_port->dp; if (dig_port->base.type == INTEL_OUTPUT_EDP && - (long_hpd || !intel_pps_have_power(intel_dp))) { + (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we @@ -4737,432 +4698,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect drm_connector_attach_vrr_capable_property(connector); } -/** - * intel_dp_set_drrs_state - program registers for RR switch to take effect - * @dev_priv: i915 device - * @crtc_state: a pointer to the active intel_crtc_state - * @refresh_rate: RR to be programmed - * - * This function gets called when refresh rate (RR) has to be changed from - * one frequency to another. Switches can be between high and low RR - * supported by the panel or to any other RR based on media playback (in - * this case, RR value needs to be passed from user space). - * - * The caller of this function needs to take a lock on dev_priv->drrs. - */ -static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state, - int refresh_rate) -{ - struct intel_dp *intel_dp = dev_priv->drrs.dp; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum drrs_refresh_rate_type index = DRRS_HIGH_RR; - - if (refresh_rate <= 0) { - drm_dbg_kms(&dev_priv->drm, - "Refresh rate should be positive non-zero.\n"); - return; - } - - if (intel_dp == NULL) { - drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); - return; - } - - if (!crtc) { - drm_dbg_kms(&dev_priv->drm, - "DRRS: intel_crtc not initialized\n"); - return; - } - - if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); - return; - } - - if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == - refresh_rate) - index = DRRS_LOW_RR; - - if (index == dev_priv->drrs.refresh_rate_type) { - drm_dbg_kms(&dev_priv->drm, - "DRRS requested for previously set RR...ignoring\n"); - return; - } - - if (!crtc_state->hw.active) { - drm_dbg_kms(&dev_priv->drm, - "eDP encoder disabled. CRTC not Active\n"); - return; - } - - if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { - switch (index) { - case DRRS_HIGH_RR: - intel_dp_set_m_n(crtc_state, M1_N1); - break; - case DRRS_LOW_RR: - intel_dp_set_m_n(crtc_state, M2_N2); - break; - case DRRS_MAX_RR: - default: - drm_err(&dev_priv->drm, - "Unsupported refreshrate type\n"); - } - } else if (DISPLAY_VER(dev_priv) > 6) { - i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); - u32 val; - - val = intel_de_read(dev_priv, reg); - if (index > DRRS_HIGH_RR) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val |= PIPECONF_EDP_RR_MODE_SWITCH; - } else { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; - } - intel_de_write(dev_priv, reg, val); - } - - dev_priv->drrs.refresh_rate_type = index; - - drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", - refresh_rate); -} - -static void -intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - dev_priv->drrs.busy_frontbuffer_bits = 0; - dev_priv->drrs.dp = intel_dp; -} - -/** - * intel_edp_drrs_enable - init drrs struct if supported - * @intel_dp: DP struct - * @crtc_state: A pointer to the active crtc state. - * - * Initializes frontbuffer_bits and drrs.dp - */ -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!crtc_state->has_drrs) - return; - - drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); - - mutex_lock(&dev_priv->drrs.mutex); - - if (dev_priv->drrs.dp) { - drm_warn(&dev_priv->drm, "DRRS already enabled\n"); - goto unlock; - } - - intel_edp_drrs_enable_locked(intel_dp); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void -intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { - int refresh; - - refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); - intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); - } - - dev_priv->drrs.dp = NULL; -} - -/** - * intel_edp_drrs_disable - Disable DRRS - * @intel_dp: DP struct - * @old_crtc_state: Pointer to old crtc_state. - * - */ -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!old_crtc_state->has_drrs) - return; - - mutex_lock(&dev_priv->drrs.mutex); - if (!dev_priv->drrs.dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); - mutex_unlock(&dev_priv->drrs.mutex); - - cancel_delayed_work_sync(&dev_priv->drrs.work); -} - -/** - * intel_edp_drrs_update - Update DRRS state - * @intel_dp: Intel DP - * @crtc_state: new CRTC state - * - * This function will update DRRS states, disabling or enabling DRRS when - * executing fastsets. For full modeset, intel_edp_drrs_disable() and - * intel_edp_drrs_enable() should be called instead. - */ -void -intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - mutex_lock(&dev_priv->drrs.mutex); - - /* New state matches current one? */ - if (crtc_state->has_drrs == !!dev_priv->drrs.dp) - goto unlock; - - if (crtc_state->has_drrs) - intel_edp_drrs_enable_locked(intel_dp); - else - intel_edp_drrs_disable_locked(intel_dp, crtc_state); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void intel_edp_drrs_downclock_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), drrs.work.work); - struct intel_dp *intel_dp; - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - - if (!intel_dp) - goto unlock; - - /* - * The delayed work can race with an invalidate hence we need to - * recheck. - */ - - if (dev_priv->drrs.busy_frontbuffer_bits) - goto unlock; - - if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); - } - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_invalidate - Disable Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called everytime rendering on the given planes start. - * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; - - /* invalidate means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_flush - Restart Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called every time rendering on the given planes has - * completed or flip on a crtc is completed. So DRRS should be upclocked - * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, - * if no other planes are dirty. - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; - - /* flush means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - /* - * flush also means no more activity hence schedule downclock, if all - * other fbs are quiescent too - */ - if (!dev_priv->drrs.busy_frontbuffer_bits) - schedule_delayed_work(&dev_priv->drrs.work, - msecs_to_jiffies(1000)); - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * DOC: Display Refresh Rate Switching (DRRS) - * - * Display Refresh Rate Switching (DRRS) is a power conservation feature - * which enables swtching between low and high refresh rates, - * dynamically, based on the usage scenario. This feature is applicable - * for internal panels. - * - * Indication that the panel supports DRRS is given by the panel EDID, which - * would list multiple refresh rates for one resolution. - * - * DRRS is of 2 types - static and seamless. - * Static DRRS involves changing refresh rate (RR) by doing a full modeset - * (may appear as a blink on screen) and is used in dock-undock scenario. - * Seamless DRRS involves changing RR without any visual effect to the user - * and can be used during normal system usage. This is done by programming - * certain registers. - * - * Support for static/seamless DRRS may be indicated in the VBT based on - * inputs from the panel spec. - * - * DRRS saves power by switching to low RR based on usage scenarios. - * - * The implementation is based on frontbuffer tracking implementation. When - * there is a disturbance on the screen triggered by user activity or a periodic - * system activity, DRRS is disabled (RR is changed to high RR). When there is - * no movement on screen, after a timeout of 1 second, a switch to low RR is - * made. - * - * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() - * and intel_edp_drrs_flush() are called. - * - * DRRS can be further extended to support other internal panels and also - * the scenario of video playback wherein RR is set based on the rate - * requested by userspace. - */ - -/** - * intel_dp_drrs_init - Init basic DRRS work and mutex. - * @connector: eDP connector - * @fixed_mode: preferred mode of panel - * - * This function is called only once at driver load to initialize basic - * DRRS stuff. - * - * Returns: - * Downclock mode if panel supports it, else return NULL. - * DRRS support is determined by the presence of downclock mode (apart - * from VBT setting). - */ -static struct drm_display_mode * -intel_dp_drrs_init(struct intel_connector *connector, - struct drm_display_mode *fixed_mode) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct drm_display_mode *downclock_mode = NULL; - - INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); - mutex_init(&dev_priv->drrs.mutex); - - if (DISPLAY_VER(dev_priv) <= 6) { - drm_dbg_kms(&dev_priv->drm, - "DRRS supported for Gen7 and above\n"); - return NULL; - } - - if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); - return NULL; - } - - downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); - if (!downclock_mode) { - drm_dbg_kms(&dev_priv->drm, - "Downclock mode is not found. DRRS not supported\n"); - return NULL; - } - - dev_priv->drrs.type = dev_priv->vbt.drrs_type; - - dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; - drm_dbg_kms(&dev_priv->drm, - "seamless DRRS supported for eDP panel.\n"); - return downclock_mode; -} - static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 2121aaa9b8db..3dd6ebc2f6b1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -70,17 +70,6 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); - void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c new file mode 100644 index 000000000000..22acd945a9e4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_display_types.h" +#include "intel_dpt.h" +#include "intel_fb.h" +#include "gt/gen8_ppgtt.h" + +struct i915_dpt { + struct i915_address_space vm; + + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void __iomem *iomem; +}; + +#define i915_is_dpt(vm) ((vm)->is_dpt) + +static inline struct i915_dpt * +i915_vm_to_dpt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); + GEM_BUG_ON(!i915_is_dpt(vm)); + return container_of(vm, struct i915_dpt, vm); +} + +#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void dpt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + + gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, + vm->pte_encode(addr, level, flags)); +} + +static void dpt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); + struct sgt_iter sgt_iter; + dma_addr_t addr; + int i; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + i = vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(&base[i++], pte_encode | addr); +} + +static void dpt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void dpt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + if (i915_gem_object_is_lmem(obj)) + pte_flags |= PTE_LM; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); +} + +static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) +{ + vm->clear_range(vm, vma->node.start, vma->size); +} + +static void dpt_cleanup(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_gem_object_put(dpt->obj); +} + +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + intel_wakeref_t wakeref; + struct i915_vma *vma; + void __iomem *iomem; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + atomic_inc(&i915->gpu_error.pending_fb_pin); + + vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, + HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); + if (IS_ERR(vma)) + goto err; + + iomem = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(iomem)) { + vma = iomem; + goto err; + } + + dpt->vma = vma; + dpt->iomem = iomem; + + i915_vma_get(vma); + +err: + atomic_dec(&i915->gpu_error.pending_fb_pin); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return vma; +} + +void intel_dpt_unpin(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vma_unpin_iomap(dpt->vma); + i915_vma_put(dpt->vma); +} + +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb) +{ + struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; + struct drm_i915_private *i915 = to_i915(obj->dev); + struct drm_i915_gem_object *dpt_obj; + struct i915_address_space *vm; + struct i915_dpt *dpt; + size_t size; + int ret; + + if (intel_fb_needs_pot_stride_remap(fb)) + size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); + else + size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); + + size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); + + if (HAS_LMEM(i915)) + dpt_obj = i915_gem_object_create_lmem(i915, size, 0); + else + dpt_obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(dpt_obj)) + return ERR_CAST(dpt_obj); + + ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); + if (ret) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(ret); + } + + dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); + if (!dpt) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(-ENOMEM); + } + + vm = &dpt->vm; + + vm->gt = &i915->gt; + vm->i915 = i915; + vm->dma = i915->drm.dev; + vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + vm->is_dpt = true; + + i915_address_space_init(vm, VM_CLASS_DPT); + + vm->insert_page = dpt_insert_page; + vm->clear_range = dpt_clear_range; + vm->insert_entries = dpt_insert_entries; + vm->cleanup = dpt_cleanup; + + vm->vma_ops.bind_vma = dpt_bind_vma; + vm->vma_ops.unbind_vma = dpt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; + + vm->pte_encode = gen8_ggtt_pte_encode; + + dpt->obj = dpt_obj; + + return &dpt->vm; +} + +void intel_dpt_destroy(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vm_close(&dpt->vm); +} diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h new file mode 100644 index 000000000000..45142b8f849f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DPT_H__ +#define __INTEL_DPT_H__ + +struct i915_address_space; +struct i915_vma; +struct intel_framebuffer; + +void intel_dpt_destroy(struct i915_address_space *vm); +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); +void intel_dpt_unpin(struct i915_address_space *vm); +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb); + +#endif /* __INTEL_DPT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c new file mode 100644 index 000000000000..3c7d6bf57948 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_drrs.h" +#include "intel_panel.h" + +/** + * DOC: Display Refresh Rate Switching (DRRS) + * + * Display Refresh Rate Switching (DRRS) is a power conservation feature + * which enables swtching between low and high refresh rates, + * dynamically, based on the usage scenario. This feature is applicable + * for internal panels. + * + * Indication that the panel supports DRRS is given by the panel EDID, which + * would list multiple refresh rates for one resolution. + * + * DRRS is of 2 types - static and seamless. + * Static DRRS involves changing refresh rate (RR) by doing a full modeset + * (may appear as a blink on screen) and is used in dock-undock scenario. + * Seamless DRRS involves changing RR without any visual effect to the user + * and can be used during normal system usage. This is done by programming + * certain registers. + * + * Support for static/seamless DRRS may be indicated in the VBT based on + * inputs from the panel spec. + * + * DRRS saves power by switching to low RR based on usage scenarios. + * + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. + * + * DRRS can be further extended to support other internal panels and also + * the scenario of video playback wherein RR is set based on the rate + * requested by userspace. + */ + +void +intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int pixel_clock; + + if (pipe_config->vrr.enable) + return; + + /* + * DRRS and PSR can't be enable together, so giving preference to PSR + * as it allows more power-savings by complete shutting down display, + * so to guarantee this, intel_dp_drrs_compute_config() must be called + * after intel_psr_compute_config(). + */ + if (pipe_config->has_psr) + return; + + if (!intel_connector->panel.downclock_mode || + dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + pipe_config->has_drrs = true; + + pixel_clock = intel_connector->panel.downclock_mode->clock; + if (pipe_config->splitter.enable) + pixel_clock /= pipe_config->splitter.link_count; + + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, + pipe_config->port_clock, &pipe_config->dp_m2_n2, + constant_n, pipe_config->fec_enable); + + /* FIXME: abstract this better */ + if (pipe_config->splitter.enable) + pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; +} + +/** + * intel_dp_set_drrs_state - program registers for RR switch to take effect + * @dev_priv: i915 device + * @crtc_state: a pointer to the active intel_crtc_state + * @refresh_rate: RR to be programmed + * + * This function gets called when refresh rate (RR) has to be changed from + * one frequency to another. Switches can be between high and low RR + * supported by the panel or to any other RR based on media playback (in + * this case, RR value needs to be passed from user space). + * + * The caller of this function needs to take a lock on dev_priv->drrs. + */ +static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state, + int refresh_rate) +{ + struct intel_dp *intel_dp = dev_priv->drrs.dp; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum drrs_refresh_rate_type index = DRRS_HIGH_RR; + + if (refresh_rate <= 0) { + drm_dbg_kms(&dev_priv->drm, + "Refresh rate should be positive non-zero.\n"); + return; + } + + if (intel_dp == NULL) { + drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); + return; + } + + if (!crtc) { + drm_dbg_kms(&dev_priv->drm, + "DRRS: intel_crtc not initialized\n"); + return; + } + + if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); + return; + } + + if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == + refresh_rate) + index = DRRS_LOW_RR; + + if (index == dev_priv->drrs.refresh_rate_type) { + drm_dbg_kms(&dev_priv->drm, + "DRRS requested for previously set RR...ignoring\n"); + return; + } + + if (!crtc_state->hw.active) { + drm_dbg_kms(&dev_priv->drm, + "eDP encoder disabled. CRTC not Active\n"); + return; + } + + if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { + switch (index) { + case DRRS_HIGH_RR: + intel_dp_set_m_n(crtc_state, M1_N1); + break; + case DRRS_LOW_RR: + intel_dp_set_m_n(crtc_state, M2_N2); + break; + case DRRS_MAX_RR: + default: + drm_err(&dev_priv->drm, + "Unsupported refreshrate type\n"); + } + } else if (DISPLAY_VER(dev_priv) > 6) { + i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); + u32 val; + + val = intel_de_read(dev_priv, reg); + if (index > DRRS_HIGH_RR) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val |= PIPECONF_EDP_RR_MODE_SWITCH; + } else { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val &= ~PIPECONF_EDP_RR_MODE_SWITCH; + } + intel_de_write(dev_priv, reg, val); + } + + dev_priv->drrs.refresh_rate_type = index; + + drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", + refresh_rate); +} + +static void +intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + dev_priv->drrs.busy_frontbuffer_bits = 0; + dev_priv->drrs.dp = intel_dp; +} + +/** + * intel_edp_drrs_enable - init drrs struct if supported + * @intel_dp: DP struct + * @crtc_state: A pointer to the active crtc state. + * + * Initializes frontbuffer_bits and drrs.dp + */ +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!crtc_state->has_drrs) + return; + + drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); + + mutex_lock(&dev_priv->drrs.mutex); + + if (dev_priv->drrs.dp) { + drm_warn(&dev_priv->drm, "DRRS already enabled\n"); + goto unlock; + } + + intel_edp_drrs_enable_locked(intel_dp); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void +intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { + int refresh; + + refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); + intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); + } + + dev_priv->drrs.dp = NULL; +} + +/** + * intel_edp_drrs_disable - Disable DRRS + * @intel_dp: DP struct + * @old_crtc_state: Pointer to old crtc_state. + * + */ +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!old_crtc_state->has_drrs) + return; + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); + mutex_unlock(&dev_priv->drrs.mutex); + + cancel_delayed_work_sync(&dev_priv->drrs.work); +} + +/** + * intel_edp_drrs_update - Update DRRS state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This function will update DRRS states, disabling or enabling DRRS when + * executing fastsets. For full modeset, intel_edp_drrs_disable() and + * intel_edp_drrs_enable() should be called instead. + */ +void +intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + mutex_lock(&dev_priv->drrs.mutex); + + /* New state matches current one? */ + if (crtc_state->has_drrs == !!dev_priv->drrs.dp) + goto unlock; + + if (crtc_state->has_drrs) + intel_edp_drrs_enable_locked(intel_dp); + else + intel_edp_drrs_disable_locked(intel_dp, crtc_state); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void intel_edp_drrs_downclock_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), drrs.work.work); + struct intel_dp *intel_dp; + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. + */ + + if (dev_priv->drrs.busy_frontbuffer_bits) + goto unlock; + + if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { + struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); + } + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_invalidate - Disable Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called everytime rendering on the given planes start. + * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; + + /* invalidate means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); + + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_flush - Restart Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed or flip on a crtc is completed. So DRRS should be upclocked + * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, + * if no other planes are dirty. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* flush means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); + + /* + * flush also means no more activity hence schedule downclock, if all + * other fbs are quiescent too + */ + if (!dev_priv->drrs.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->drrs.work, + msecs_to_jiffies(1000)); + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_dp_drrs_init - Init basic DRRS work and mutex. + * @connector: eDP connector + * @fixed_mode: preferred mode of panel + * + * This function is called only once at driver load to initialize basic + * DRRS stuff. + * + * Returns: + * Downclock mode if panel supports it, else return NULL. + * DRRS support is determined by the presence of downclock mode (apart + * from VBT setting). + */ +struct drm_display_mode * +intel_dp_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; + struct drm_display_mode *downclock_mode = NULL; + + INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); + mutex_init(&dev_priv->drrs.mutex); + + if (DISPLAY_VER(dev_priv) <= 6) { + drm_dbg_kms(&dev_priv->drm, + "DRRS supported for Gen7 and above\n"); + return NULL; + } + + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); + return NULL; + } + + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); + if (!downclock_mode) { + drm_dbg_kms(&dev_priv->drm, + "Downclock mode is not found. DRRS not supported\n"); + return NULL; + } + + dev_priv->drrs.type = dev_priv->vbt.drrs_type; + + dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; + drm_dbg_kms(&dev_priv->drm, + "seamless DRRS supported for eDP panel.\n"); + return downclock_mode; +} diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h new file mode 100644 index 000000000000..ffa175b4cf4f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DRRS_H__ +#define __INTEL_DRRS_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; +struct intel_connector; +struct intel_dp; + +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n); +struct drm_display_mode *intel_dp_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode); + +#endif /* __INTEL_DRRS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 8e75debcce1a..e4834d84ce5e 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -62,6 +62,7 @@ #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_drrs.h" #include "intel_psr.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index c3787512295d..926ddc6599f5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1831,6 +1831,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, bool has_hdmi_sink) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port); if (clock < 25000) return MODE_CLOCK_LOW; @@ -1851,6 +1852,14 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000) return MODE_CLOCK_RANGE; + /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */ + if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200) + return MODE_CLOCK_RANGE; + + /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */ + if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800) + return MODE_CLOCK_RANGE; + /* * SNPS PHYs' MPLLB table-based programming can only handle a fixed * set of link rates. @@ -1892,7 +1901,7 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, if (ycbcr420_output) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36; else - return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; + return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36; case 10: if (DISPLAY_VER(i915) < 11) return false; @@ -1903,7 +1912,7 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, if (ycbcr420_output) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30; else - return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30; + return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30; case 8: return true; default: diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 3855fba70980..08c20869d7e9 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -361,6 +361,36 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index a36ec4a818ff..466bf6820641 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -1074,14 +1074,14 @@ static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp) edp_panel_vdd_schedule_off(intel_dp); } -bool intel_pps_have_power(struct intel_dp *intel_dp) +bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; bool have_power = false; with_intel_pps_lock(intel_dp, wakeref) { - have_power = edp_have_panel_power(intel_dp) && - edp_have_panel_vdd(intel_dp); + have_power = edp_have_panel_power(intel_dp) || + edp_have_panel_vdd(intel_dp); } return have_power; diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index fbbcca782e7b..9fe7be4fe867 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -36,7 +36,7 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp); void intel_pps_on(struct intel_dp *intel_dp); void intel_pps_off(struct intel_dp *intel_dp); void intel_pps_vdd_off_sync(struct intel_dp *intel_dp); -bool intel_pps_have_power(struct intel_dp *intel_dp); +bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp); void intel_pps_wait_power_cycle(struct intel_dp *intel_dp); void intel_pps_init(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1b0daf649e82..a3d0c57ec0f0 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -936,6 +936,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + /* Wa_16011303918:adl-p */ + if (crtc_state->vrr.enable && + IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, not compatible with HW stepping + VRR\n"); + return false; + } + + if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -949,12 +963,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!crtc_state->enable_psr2_sel_fetch && IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - return false; + goto unsupported; } if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); - return false; + goto unsupported; } if (!crtc_state->enable_psr2_sel_fetch && @@ -963,25 +977,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); - return false; - } - - if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); - return false; - } - - /* Wa_16011303918:adl-p */ - if (crtc_state->vrr.enable && - IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, not compatible with HW stepping + VRR\n"); - return false; + goto unsupported; } tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; + +unsupported: + crtc_state->enable_psr2_sel_fetch = false; + return false; } void intel_psr_compute_config(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 18b52b64af95..536b319ffe5b 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -32,7 +32,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + phy_name(phy)); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 5130e8ed9564..28e07040cf47 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -66,7 +66,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * mmap ioctl is disallowed for all discrete platforms, * and for all platforms with GRAPHICS_VER > 12. */ - if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) + if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) @@ -438,7 +438,7 @@ vm_access(struct vm_area_struct *area, unsigned long addr, return -EACCES; addr -= area->vm_start; - if (addr >= obj->base.size) + if (range_overflows_t(u64, addr, len, obj->base.size)) return -EINVAL; i915_gem_ww_ctx_init(&ww, true); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8d6c38a62201..9053cea3395a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -162,7 +162,6 @@ retry: /* Immediately discard the backing storage */ void i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) obj->ops->truncate(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 6ea13159bffc..4b823fbfe76a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -759,11 +759,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { - if (bo->priority < I915_TTM_PRIO_HAS_PAGES) - bo->priority = I915_TTM_PRIO_HAS_PAGES; + bo->priority = I915_TTM_PRIO_NO_PAGES; } else { - if (bo->priority > I915_TTM_PRIO_NO_PAGES) - bo->priority = I915_TTM_PRIO_NO_PAGES; + bo->priority = I915_TTM_PRIO_HAS_PAGES; } ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..95ff630157b9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -133,7 +133,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) { u32 request[] = { GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, - SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), + SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), id, }; diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index d1d4b97b86f5..287f5a3d0b35 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c0d83d292dc..994c56fcb199 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -50,11 +50,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index aea4cc2b3486..57c1dda76b94 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3713,8 +3713,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) MISSING_CASE(DISPLAY_VER(dev_priv)); } - /* Default to an unusable block time */ - dev_priv->sagv_block_time_us = -1; + dev_priv->sagv_block_time_us = 0; } /* @@ -4020,6 +4019,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4035,17 +4045,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } @@ -4844,7 +4843,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4861,7 +4860,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes && dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; @@ -5635,7 +5634,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; result->enable = true; - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us) result->can_sagv = latency >= dev_priv->sagv_block_time_us; } @@ -5666,7 +5665,10 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; struct skl_wm_level *levels = plane_wm->wm; - unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; + unsigned int latency = 0; + + if (dev_priv->sagv_block_time_us) + latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0]; skl_compute_plane_wm(crtc_state, 0, latency, wm_params, &levels[0], @@ -6681,6 +6683,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; } +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 91f23b7f0af2..79d89fe22d8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -48,6 +48,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); +void skl_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 87428fb23d9f..a2277a0d6d06 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np); struct imx_hdmi *hdmi; + int ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) @@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) hdmi->bridge = of_drm_find_bridge(np); if (!hdmi->bridge) { dev_err(hdmi->dev, "Unable to find bridge\n"); + dw_hdmi_remove(hdmi->hdmi); return -ENODEV; } - return component_add(&pdev->dev, &dw_hdmi_imx_ops); + ret = component_add(&pdev->dev, &dw_hdmi_imx_ops); + if (ret) + dw_hdmi_remove(hdmi->hdmi); + + return ret; } static int dw_hdmi_imx_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index e5078d03020d..fb0e951248f6 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev, edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); + if (!channel->edid) + return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index a8aba0141ce7..63ba2ad84679 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + drm_mode_destroy(connector->dev, mode); return ret; + } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; @@ -217,14 +219,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; - if (bus_flags & - ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) { - dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags); - return -EINVAL; - } - bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 93b40c245f00..5d90d2eb0019 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include <video/mipi_display.h> #include <video/videomode.h> @@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) struct mtk_dsi *dsi = dev_get_drvdata(dev); ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; - return ret; + return device_reset_optional(dev); } static void mtk_dsi_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 28a519cdf66b..523fce45f16b 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -2,6 +2,7 @@ meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o meson-drm-y += meson_rdma.o meson_osd_afbcd.o +meson-drm-y += meson_encoder_hdmi.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index bc0d60df04ae..c98525d60df5 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -32,6 +32,7 @@ #include "meson_osd_afbcd.h" #include "meson_registers.h" #include "meson_venc_cvbs.h" +#include "meson_encoder_hdmi.h" #include "meson_viu.h" #include "meson_vpp.h" #include "meson_rdma.h" @@ -206,8 +207,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) priv->compat = match->compat; priv->afbcd.ops = match->afbcd_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource_byname(pdev, "vpu"); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto free_drm; @@ -302,38 +302,42 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (priv->afbcd.ops) { ret = priv->afbcd.ops->init(priv); if (ret) - return ret; + goto free_drm; } /* Encoder Initialization */ ret = meson_venc_cvbs_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; if (has_components) { ret = component_bind_all(drm->dev, drm); if (ret) { dev_err(drm->dev, "Couldn't bind all components\n"); - goto free_drm; + goto exit_afbcd; } } + ret = meson_encoder_hdmi_init(priv); + if (ret) + goto exit_afbcd; + ret = meson_plane_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = meson_overlay_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = meson_crtc_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); if (ret) - goto free_drm; + goto exit_afbcd; drm_mode_config_reset(drm); @@ -351,6 +355,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) uninstall_irq: free_irq(priv->vsync_irq, drm); +exit_afbcd: + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); free_drm: drm_dev_put(drm); @@ -381,10 +388,8 @@ static void meson_drv_unbind(struct device *dev) free_irq(priv->vsync_irq, drm); drm_dev_put(drm); - if (priv->afbcd.ops) { - priv->afbcd.ops->reset(priv); - meson_rdma_free(priv); - } + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); } static const struct component_master_ops meson_drv_master_ops = { diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 2ed87cfdd735..fb540a503efe 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -22,14 +22,11 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <linux/media-bus-format.h> #include <linux/videodev2.h> #include "meson_drv.h" #include "meson_dw_hdmi.h" #include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc.h" #define DRIVER_NAME "meson-dw-hdmi" #define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver" @@ -135,8 +132,6 @@ struct meson_dw_hdmi_data { }; struct meson_dw_hdmi { - struct drm_encoder encoder; - struct drm_bridge bridge; struct dw_hdmi_plat_data dw_plat_data; struct meson_drm *priv; struct device *dev; @@ -148,12 +143,8 @@ struct meson_dw_hdmi { struct regulator *hdmi_supply; u32 irq_stat; struct dw_hdmi *hdmi; - unsigned long output_bus_fmt; + struct drm_bridge *bridge; }; -#define encoder_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, encoder) -#define bridge_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, bridge) static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi, const char *compat) @@ -295,14 +286,14 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, /* Setup PHY bandwidth modes */ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool mode_is_420) { struct meson_drm *priv = dw_hdmi->priv; unsigned int pixel_clock = mode->clock; /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - pixel_clock /= 2; + if (mode_is_420) pixel_clock /= 2; if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) { @@ -374,68 +365,25 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi) mdelay(2); } -static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) -{ - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - if (!vic) { - meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, - vclk_freq, vclk_freq, vclk_freq, false); - return; - } - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", - phy_freq, vclk_freq, venc_freq, hdmi_freq, - priv->venc.hdmi_use_enci); - - meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, - venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); -} - static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *display, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; + bool is_hdmi2_sink = display->hdmi.scdc.supported; struct meson_drm *priv = dw_hdmi->priv; unsigned int wr_clk = readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING)); + bool mode_is_420 = false; DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name, mode->clock > 340000 ? 40 : 10); + if (drm_mode_is_420_only(display, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display, mode))) + mode_is_420 = true; + /* Enable clocks */ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); @@ -457,8 +405,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); /* TMDS pattern setup */ - if (mode->clock > 340000 && - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) { + if (mode->clock > 340000 && !mode_is_420) { dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0); dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, @@ -476,7 +423,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2); /* Setup PHY parameters */ - meson_hdmi_phy_setup_mode(dw_hdmi, mode); + meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); /* Setup PHY */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, @@ -622,214 +569,15 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, hpd_connected); - drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); + drm_helper_hpd_irq_event(dw_hdmi->bridge->dev); + drm_bridge_hpd_notify(dw_hdmi->bridge, + hpd_connected ? connector_status_connected + : connector_status_disconnected); } return IRQ_HANDLED; } -static enum drm_mode_status -dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) -{ - struct meson_dw_hdmi *dw_hdmi = data; - struct meson_drm *priv = dw_hdmi->priv; - bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - int vic = drm_match_cea_mode(mode); - enum drm_mode_status status; - - DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); - - /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ - if (display_info->max_tmds_clock && - mode->clock > display_info->max_tmds_clock && - !drm_mode_is_420_only(display_info, mode) && - !drm_mode_is_420_also(display_info, mode)) - return MODE_BAD; - - /* Check against non-VIC supported modes */ - if (!vic) { - status = meson_venc_hdmi_supported_mode(mode); - if (status != MODE_OK) - return status; - - return meson_vclk_dmt_supported_freq(priv, mode->clock); - /* Check against supported VIC modes */ - } else if (!meson_venc_hdmi_supported_vic(vic)) - return MODE_BAD; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", - __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); - - return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); -} - -/* Encoder */ - -static const u32 meson_dw_hdmi_out_bus_fmts[] = { - MEDIA_BUS_FMT_YUV8_1X24, - MEDIA_BUS_FMT_UYYVYY8_0_5X24, -}; - -static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = { - .destroy = meson_venc_hdmi_encoder_destroy, -}; - -static u32 * -meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts) -{ - u32 *input_fmts = NULL; - int i; - - *num_input_fmts = 0; - - for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) { - if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) { - *num_input_fmts = 1; - input_fmts = kcalloc(*num_input_fmts, - sizeof(*input_fmts), - GFP_KERNEL); - if (!input_fmts) - return NULL; - - input_fmts[0] = output_fmt; - - break; - } - } - - return input_fmts; -} - -static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - - dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; - - DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt); - - return 0; -} - -static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("\n"); - - writel_bits_relaxed(0x3, 0, - priv->io_base + _REG(VPU_HDMI_SETTING)); - - writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); - writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); - - if (priv->venc.hdmi_use_enci) - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); - else - writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; - bool yuv420_mode = false; - - DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { - ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; - yuv420_mode = true; - } - - /* VENC + VENC-DVI Mode setup */ - meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); - - /* VCLK Set clock */ - dw_hdmi_set_vclk(dw_hdmi, mode); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - /* Setup YUV420 to HDMI-TX, no 10bit diphering */ - writel_relaxed(2 | (2 << 2), - priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); - else - /* Setup YUV444 to HDMI-TX, no 10bit diphering */ - writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); -} - -static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = { - .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts, - .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_check = meson_venc_hdmi_encoder_atomic_check, - .enable = meson_venc_hdmi_encoder_enable, - .disable = meson_venc_hdmi_encoder_disable, - .mode_set = meson_venc_hdmi_encoder_mode_set, -}; - /* DW HDMI Regmap */ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg, @@ -876,28 +624,6 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { .dwc_write = dw_hdmi_g12a_dwc_write, }; -static bool meson_hdmi_connector_is_available(struct device *dev) -{ - struct device_node *ep, *remote; - - /* HDMI Connector is on the second port, first endpoint */ - ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0); - if (!ep) - return false; - - /* If the endpoint node exists, consider it enabled */ - remote = of_graph_get_remote_port(ep); - if (remote) { - of_node_put(ep); - return true; - } - - of_node_put(ep); - of_node_put(remote); - - return false; -} - static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) { struct meson_drm *priv = meson_dw_hdmi->priv; @@ -976,19 +702,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct meson_drm *priv = drm->dev_private; struct dw_hdmi_plat_data *dw_plat_data; - struct drm_bridge *next_bridge; - struct drm_encoder *encoder; - struct resource *res; int irq; int ret; DRM_DEBUG_DRIVER("\n"); - if (!meson_hdmi_connector_is_available(dev)) { - dev_info(drm->dev, "HDMI Output connector not available\n"); - return -ENODEV; - } - match = of_device_get_match_data(&pdev->dev); if (!match) { dev_err(&pdev->dev, "failed to get match data\n"); @@ -1004,7 +722,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, meson_dw_hdmi->dev = dev; meson_dw_hdmi->data = match; dw_plat_data = &meson_dw_hdmi->dw_plat_data; - encoder = &meson_dw_hdmi->encoder; meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi"); if (IS_ERR(meson_dw_hdmi->hdmi_supply)) { @@ -1042,8 +759,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return PTR_ERR(meson_dw_hdmi->hdmitx_phy); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res); + meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(meson_dw_hdmi->hdmitx)) return PTR_ERR(meson_dw_hdmi->hdmitx); @@ -1076,28 +792,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return ret; } - /* Encoder */ - - ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, "meson_hdmi"); - if (ret) { - dev_err(priv->dev, "Failed to init HDMI encoder\n"); - return ret; - } - - meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs; - drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0); - - encoder->possible_crtcs = BIT(0); - meson_dw_hdmi_init(meson_dw_hdmi); - DRM_DEBUG_DRIVER("encoder initialized\n"); - /* Bridge / Connector */ dw_plat_data->priv_data = meson_dw_hdmi; - dw_plat_data->mode_valid = dw_hdmi_mode_valid; dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops; dw_plat_data->phy_name = "meson_dw_hdmi_phy"; dw_plat_data->phy_data = meson_dw_hdmi; @@ -1112,15 +811,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, platform_set_drvdata(pdev, meson_dw_hdmi); - meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, - &meson_dw_hdmi->dw_plat_data); + meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data); if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); - next_bridge = of_drm_find_bridge(pdev->dev.of_node); - if (next_bridge) - drm_bridge_attach(encoder, next_bridge, - &meson_dw_hdmi->bridge, 0); + meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node); DRM_DEBUG_DRIVER("HDMI controller initialized\n"); diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c new file mode 100644 index 000000000000..db332fa4cd54 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_venc.h" +#include "meson_encoder_hdmi.h" + +struct meson_encoder_hdmi { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct meson_drm *priv; + unsigned long output_bus_fmt; +}; + +#define bridge_to_meson_encoder_hdmi(x) \ + container_of(x, struct meson_encoder_hdmi, bridge) + +static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + &encoder_hdmi->bridge, flags); +} + +static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, + const struct drm_display_mode *mode) +{ + struct meson_drm *priv = encoder_hdmi->priv; + int vic = drm_match_cea_mode(mode); + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + if (!vic) { + meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, + vclk_freq, vclk_freq, vclk_freq, false); + return; + } + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + phy_freq, vclk_freq, venc_freq, hdmi_freq, + priv->venc.hdmi_use_enci); + + meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, + venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); +} + +static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + bool is_hdmi2_sink = display_info->hdmi.scdc.supported; + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + int vic = drm_match_cea_mode(mode); + enum drm_mode_status status; + + dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); + + /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ + if (display_info->max_tmds_clock && + mode->clock > display_info->max_tmds_clock && + !drm_mode_is_420_only(display_info, mode) && + !drm_mode_is_420_also(display_info, mode)) + return MODE_BAD; + + /* Check against non-VIC supported modes */ + if (!vic) { + status = meson_venc_hdmi_supported_mode(mode); + if (status != MODE_OK) + return status; + + return meson_vclk_dmt_supported_freq(priv, mode->clock); + /* Check against supported VIC modes */ + } else if (!meson_venc_hdmi_supported_vic(vic)) + return MODE_BAD; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); + + return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); +} + +static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; + struct meson_drm *priv = encoder_hdmi->priv; + struct drm_connector_state *conn_state; + const struct drm_display_mode *mode; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + bool yuv420_mode = false; + int vic; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + vic = drm_match_cea_mode(mode); + + dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { + ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; + yuv420_mode = true; + } + + /* VENC + VENC-DVI Mode setup */ + meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); + + /* VCLK Set clock */ + meson_encoder_hdmi_set_vclk(encoder_hdmi, mode); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + /* Setup YUV420 to HDMI-TX, no 10bit diphering */ + writel_relaxed(2 | (2 << 2), + priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + else + /* Setup YUV444 to HDMI-TX, no 10bit diphering */ + writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + + dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); + + if (priv->venc.hdmi_use_enci) + writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); + else + writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + + writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); + writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static const u32 meson_encoder_hdmi_out_bus_fmts[] = { + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_UYYVYY8_0_5X24, +}; + +static u32 * +meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts = NULL; + int i; + + *num_input_fmts = 0; + + for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) { + if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) { + *num_input_fmts = 1; + input_fmts = kcalloc(*num_input_fmts, + sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = output_fmt; + + break; + } + } + + return input_fmts; +} + +static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector); + struct meson_drm *priv = encoder_hdmi->priv; + + encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; + + dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt); + + if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state)) + crtc_state->mode_changed = true; + + return 0; +} + +static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = { + .attach = meson_encoder_hdmi_attach, + .mode_valid = meson_encoder_hdmi_mode_valid, + .atomic_enable = meson_encoder_hdmi_atomic_enable, + .atomic_disable = meson_encoder_hdmi_atomic_disable, + .atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts, + .atomic_check = meson_encoder_hdmi_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_hdmi_init(struct meson_drm *priv) +{ + struct meson_encoder_hdmi *meson_encoder_hdmi; + struct device_node *remote; + int ret; + + meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL); + if (!meson_encoder_hdmi) + return -ENOMEM; + + /* HDMI Transceiver Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0); + if (!remote) { + dev_err(priv->dev, "HDMI transceiver device is disabled"); + return 0; + } + + meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_hdmi->next_bridge) { + dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n"); + return -EPROBE_DEFER; + } + + /* HDMI Encoder Bridge */ + meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs; + meson_encoder_hdmi->bridge.of_node = priv->dev->of_node; + meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + + drm_bridge_add(&meson_encoder_hdmi->bridge); + + meson_encoder_hdmi->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder, + DRM_MODE_ENCODER_TMDS); + if (ret) { + dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret); + return ret; + } + + meson_encoder_hdmi->encoder.possible_crtcs = BIT(0); + + /* Attach HDMI Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL, 0); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* + * We should have now in place: + * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[dw-hdmi connector] + */ + + dev_dbg(priv->dev, "HDMI encoder initialized\n"); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h new file mode 100644 index 000000000000..ed19494f0956 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef __MESON_ENCODER_HDMI_H +#define __MESON_ENCODER_HDMI_H + +int meson_encoder_hdmi_init(struct meson_drm *priv); + +#endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index ffc6b584dbf8..0cdbe899402f 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0; } -static int meson_gxm_afbcd_init(struct meson_drm *priv) -{ - return 0; -} - static int meson_gxm_afbcd_reset(struct meson_drm *priv) { writel_relaxed(VIU_SW_RESET_OSD1_AFBCD, @@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv) return 0; } +static int meson_gxm_afbcd_init(struct meson_drm *priv) +{ + return 0; +} + +static void meson_gxm_afbcd_exit(struct meson_drm *priv) +{ + meson_gxm_afbcd_reset(priv); +} + static int meson_gxm_afbcd_enable(struct meson_drm *priv) { writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) | @@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_gxm_ops = { .init = meson_gxm_afbcd_init, + .exit = meson_gxm_afbcd_exit, .reset = meson_gxm_afbcd_reset, .enable = meson_gxm_afbcd_enable, .disable = meson_gxm_afbcd_disable, @@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0; } +static int meson_g12a_afbcd_reset(struct meson_drm *priv) +{ + meson_rdma_reset(priv); + + meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | + VIU_SW_RESET_G12A_OSD1_AFBCD, + VIU_SW_RESET); + meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); + + return 0; +} + static int meson_g12a_afbcd_init(struct meson_drm *priv) { int ret; @@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv) return 0; } -static int meson_g12a_afbcd_reset(struct meson_drm *priv) +static void meson_g12a_afbcd_exit(struct meson_drm *priv) { - meson_rdma_reset(priv); - - meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | - VIU_SW_RESET_G12A_OSD1_AFBCD, - VIU_SW_RESET); - meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); - - return 0; + meson_g12a_afbcd_reset(priv); + meson_rdma_free(priv); } static int meson_g12a_afbcd_enable(struct meson_drm *priv) @@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_g12a_ops = { .init = meson_g12a_afbcd_init, + .exit = meson_g12a_afbcd_exit, .reset = meson_g12a_afbcd_reset, .enable = meson_g12a_afbcd_enable, .disable = meson_g12a_afbcd_disable, diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h index 5e5523304f42..e77ddeb6416f 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.h +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h @@ -14,6 +14,7 @@ struct meson_afbcd_ops { int (*init)(struct meson_drm *priv); + void (*exit)(struct meson_drm *priv); int (*reset)(struct meson_drm *priv); int (*enable)(struct meson_drm *priv); int (*disable)(struct meson_drm *priv); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fd98e8bbc550..2c7271f545dc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -529,7 +529,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev, WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); - WREG_GFX(6, 0x05); + /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode), + * so that it doesn't hang when running kexec/kdump on G200_SE rev42. + */ + WREG_GFX(6, 0x0d); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c index e9ae22b4f813..52be08b744ad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_pll.c +++ b/drivers/gpu/drm/mgag200/mgag200_pll.c @@ -404,9 +404,9 @@ mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pl udelay(50); /* program pixel pll register */ - WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); - WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); - WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); + WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn); + WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm); + WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp); udelay(50); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index b681c45520bb..9b41e2f82fc2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -658,19 +658,23 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); const u32 *regs = a6xx_protect; - unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32; - - BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32); - BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48); + unsigned i, count, count_max; if (adreno_is_a650(adreno_gpu)) { regs = a650_protect; count = ARRAY_SIZE(a650_protect); count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48); } else if (adreno_is_a660_family(adreno_gpu)) { regs = a660_protect; count = ARRAY_SIZE(a660_protect); count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48); + } else { + regs = a6xx_protect; + count = ARRAY_SIZE(a6xx_protect); + count_max = 32; + BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32); } /* @@ -1707,7 +1711,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, 0x1ffffffffULL); + "gpu", 0x100000000ULL, SZ_4G); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e9d3fa1544b..6bde3e234ec8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1107,7 +1107,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } - if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && dpu_enc->cur_master->hw_mdptop && dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index f9c83d6e427a..24fbaf562d41 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -35,6 +35,14 @@ int dpu_rm_destroy(struct dpu_rm *rm) { int i; + for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { + struct dpu_hw_dspp *hw; + + if (rm->dspp_blks[i]) { + hw = to_dpu_hw_dspp(rm->dspp_blks[i]); + dpu_hw_dspp_destroy(hw); + } + } for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { struct dpu_hw_pingpong *hw; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index c6b69afcbac8..50e854207c70 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_destroy_state(plane->state); kfree(to_mdp5_plane_state(plane->state)); + plane->state = NULL; mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return; if (plane->type == DRM_PLANE_TYPE_PRIMARY) mdp5_state->base.zpos = STAGE_BASE; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index cabe15190ec1..369e57f73a47 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -169,6 +169,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len, va_list va; new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL); + if (!new_blk) + return; va_start(va, fmt); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 62e75dc8afc6..4af281d97493 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1744,6 +1744,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) /* end with failure */ break; /* lane == 1 already */ } + + /* stop link training before start re training */ + dp_ctrl_clear_training_pattern(ctrl); } } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index a0392e4d8134..a133f7e154e7 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -551,6 +551,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) mutex_unlock(&dp->event_mutex); + /* + * add fail safe mode outside event_mutex scope + * to avoid potiential circular lock with drm thread + */ + dp_panel_add_fail_safe_mode(dp->dp_display.connector); + /* uevent will complete connection part */ return 0; }; @@ -1442,6 +1448,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv; + struct dp_display_private *dp_priv; int ret; if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) @@ -1450,6 +1457,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, priv = dev->dev_private; dp_display->drm_dev = dev; + dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + ret = dp_display_request_irq(dp_display); if (ret) { DRM_ERROR("request_irq failed, ret=%d\n", ret); @@ -1467,6 +1476,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return ret; } + dp_priv->panel->connector = dp_display->connector; + priv->connectors[priv->num_connectors++] = dp_display->connector; return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 2181b60e1d1d..982f5e8c3546 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector, return rc; } +void dp_panel_add_fail_safe_mode(struct drm_connector *connector) +{ + /* fail safe edid */ + mutex_lock(&connector->dev->mode_config.mutex); + if (drm_add_modes_noedid(connector, 640, 480)) + drm_set_preferred_mode(connector, 640, 480); + mutex_unlock(&connector->dev->mode_config.mutex); +} + int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector) { @@ -207,11 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, goto end; } - /* fail safe edid */ - mutex_lock(&connector->dev->mode_config.mutex); - if (drm_add_modes_noedid(connector, 640, 480)) - drm_set_preferred_mode(connector, 640, 480); - mutex_unlock(&connector->dev->mode_config.mutex); + dp_panel_add_fail_safe_mode(connector); } if (panel->aux_cfg_update_done) { diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 9023e5bb4b8b..99739ea679a7 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel); int dp_panel_deinit(struct dp_panel *dp_panel); int dp_panel_timing_cfg(struct dp_panel *dp_panel); void dp_panel_dump_regs(struct dp_panel *dp_panel); +void dp_panel_add_fail_safe_mode(struct drm_connector *connector); int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector); u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dc85974c7897..eea679a52e86 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1909,7 +1909,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* do not autoenable, will be enabled later */ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN, + IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, "dsi_isr", msm_host); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index fa4c396df6a9..6e43672f5807 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -643,7 +643,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) return connector; fail: - connector->funcs->destroy(msm_dsi->connector); + connector->funcs->destroy(connector); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index d8128f50b0dd..0b782cc18b3f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -562,7 +562,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov char clk_name[32], parent[32], vco_name[32]; char parent2[32], parent3[32], parent4[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index 5b4e991f220d..1c1e9861b93f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -804,7 +804,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov { char clk_name[32], parent[32], vco_name[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 2da673a2add6..48eab80b548e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov { char clk_name[32], parent1[32], parent2[32], vco_name[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", .name = "xo", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 71ed4aa0dc67..fc56cdcc9ad6 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov { char *clk_name, *parent_name, *vco_name; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "pxo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index cb297b08458e..8cc1ef8199ac 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -590,7 +590,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide char clk_name[32], parent[32], vco_name[32]; char parent2[32], parent3[32], parent4[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "bi_tcxo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, @@ -864,20 +866,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, /* Alter PHY configurations if data rate less than 1.5GHZ*/ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); - /* For C-PHY, no low power settings for lower clk rate */ - if (phy->cphy_mode) - less_than_1500_mhz = false; - if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; - glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; - glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + if (phy->cphy_mode) { + glbl_rescode_top_ctrl = 0x00; + glbl_rescode_bot_ctrl = 0x3c; + } else { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + } glbl_str_swi_cal_sel_ctrl = 0x00; glbl_hstx_str_ctrl_0 = 0x88; } else { vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; - glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; - glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + if (phy->cphy_mode) { + glbl_str_swi_cal_sel_ctrl = 0x03; + glbl_hstx_str_ctrl_0 = 0x66; + } else { + glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; + glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + } glbl_rescode_top_ctrl = 0x03; glbl_rescode_bot_ctrl = 0x3c; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index cb52ac01e512..d280dd64744d 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -937,6 +937,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, get_pid_task(aspace->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); + put_task_struct(task); } else { comm = NULL; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 89dd618d78f3..988bc4fbd78d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -361,7 +361,17 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, bridge_state = drm_atomic_get_new_bridge_state(state, mxsfb->bridge); - bus_format = bridge_state->input_bus_cfg.format; + if (!bridge_state) + bus_format = MEDIA_BUS_FMT_FIXED; + else + bus_format = bridge_state->input_bus_cfg.format; + + if (bus_format == MEDIA_BUS_FMT_FIXED) { + dev_warn_once(drm->dev, + "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" + "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n"); + bus_format = MEDIA_BUS_FMT_RGB888_1X24; + } } /* If there is no bridge, use bus format from connector */ diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 1cbd71abc80a..12965a832f94 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -101,7 +101,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder, if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) return -ENODEV; - props->type = BACKLIGHT_RAW; props->max_brightness = 31; *ops = &nv40_bl_ops; return 0; @@ -294,7 +293,8 @@ nv50_backlight_init(struct nouveau_backlight *bl, struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) || + nv_conn->base.status != connector_status_connected) return -ENODEV; if (nv_conn->type == DCB_CONNECTOR_eDP) { @@ -339,7 +339,6 @@ nv50_backlight_init(struct nouveau_backlight *bl, else *ops = &nva3_bl_ops; - props->type = BACKLIGHT_RAW; props->max_brightness = 100; return 0; @@ -407,6 +406,7 @@ nouveau_backlight_init(struct drm_connector *connector) goto fail_alloc; } + props.type = BACKLIGHT_RAW; bl->dev = backlight_device_register(backlight_name, connector->kdev, nv_encoder, ops, &props); if (IS_ERR(bl->dev)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 262641a014b0..c91130a6be2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c index 667fa016496e..a6ea89a5d51a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, hsfw->imem_size = desc->code_size; hsfw->imem_tag = desc->start_tag; - hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); - memcpy(hsfw->imem, data + desc->code_off, desc->code_size); - + hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL); nvkm_firmware_put(fw); - return 0; + if (!hsfw->imem) + return -ENOMEM; + else + return 0; } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 5968c7696596..40439e329aa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 148706977eec..612310d5d481 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,11 +211,12 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gf100_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 00da1b873ce8..1a6f9c3af5ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,7 +23,7 @@ */ #include "priv.h" -static void +void gp102_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 461f722656e2..94cfb1791af6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,11 +78,12 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gp102_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index e7860d177353..21abf31f4442 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -41,9 +41,12 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); +void gp102_pmu_reset(struct nvkm_pmu *pmu); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 418638e6e3b0..af1402d83d51 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -83,6 +83,8 @@ config DRM_PANEL_SIMPLE depends on PM select VIDEOMODE_HELPERS select DRM_DP_AUX_BUS + select DRM_DP_HELPER + select DRM_KMS_HELPER help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 2c3378a259b1..e1542451ef9d 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, int ret; vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) + if (IS_ERR(vcc)) { dev_err(dev, "get optional vcc failed\n"); + vcc = NULL; + } dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, struct mipi_dbi_dev, drm); diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 46029c5610c8..145047e19394 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts, ret = i2c_smbus_write_byte_data(ts->i2c, reg, val); if (ret) - dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret); + dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret); } static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) @@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel) return 0; } -static int rpi_touchscreen_enable(struct drm_panel *panel) +static int rpi_touchscreen_prepare(struct drm_panel *panel) { struct rpi_touchscreen *ts = panel_to_ts(panel); int i; @@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel) rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01); msleep(100); + return 0; +} + +static int rpi_touchscreen_enable(struct drm_panel *panel) +{ + struct rpi_touchscreen *ts = panel_to_ts(panel); + /* Turn on the backlight. */ rpi_touchscreen_i2c_write(ts, REG_PWM, 255); @@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel, static const struct drm_panel_funcs rpi_touchscreen_funcs = { .disable = rpi_touchscreen_disable, .unprepare = rpi_touchscreen_noop, - .prepare = rpi_touchscreen_noop, + .prepare = rpi_touchscreen_prepare, .enable = rpi_touchscreen_enable, .get_modes = rpi_touchscreen_get_modes, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index b7b654f2dfd9..f9242c19b458 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2510,7 +2510,7 @@ static const struct display_timing innolux_g070y2_l01_timing = { static const struct panel_desc innolux_g070y2_l01 = { .timings = &innolux_g070y2_l01_timing, .num_timings = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 152, .height = 91, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index bbe628b306ee..f8355de6e335 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -360,8 +360,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) panfrost_gpu_init_features(pfdev); - dma_set_mask_and_coherent(pfdev->dev, + err = dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + if (err) + return err; + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0fce73b9a646..70bd84b7ef2b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 607ad5620bd9..1546abcadacf 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -204,7 +204,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { - if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && + if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) && (mode_clock * 5/4 <= max_tmds_clock)) bpc = 10; else diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 830bdd5e9b7c..8677c8271678 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c index 6b4759ed6bfd..c491429f1a02 100644 --- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -131,8 +131,10 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in) return false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); - if (!txmsg) + if (!txmsg) { + kfree(out); return false; + } drm_dp_encode_sideband_req(in, txmsg); ret = drm_dp_decode_sideband_req(txmsg, out); diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index b64d93da651d..5e2b0175df36 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node) return -EPROBE_DEFER; phy = platform_get_drvdata(pdev); - if (!phy) + if (!phy) { + put_device(&pdev->dev); return -EPROBE_DEFER; + } hdmi->phy = phy; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 145833a9d82d..5b3fbee18671 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -111,10 +111,10 @@ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 /* format 15 doesn't exist */ -/* format 16 is P010 YVU */ -#define SUN8I_MIXER_FBFMT_P010_YUV 17 -/* format 18 is P210 YVU */ -#define SUN8I_MIXER_FBFMT_P210_YUV 19 +#define SUN8I_MIXER_FBFMT_P010_YUV 16 +/* format 17 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 18 +/* format 19 is P210 YVU */ /* format 20 is packed YVU444 10-bit */ /* format 21 is packed YUV444 10-bit */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index f46d377f0c30..de1333dc0d86 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1538,8 +1538,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) dsi->slave = platform_get_drvdata(gangster); of_node_put(np); - if (!dsi->slave) + if (!dsi->slave) { + put_device(&gangster->dev); return -EPROBE_DEFER; + } dsi->slave->master = dsi; } diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 5a6e89825bc2..3e3f9ba1e885 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -779,6 +779,9 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs); + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + mode->hdisplay, mode->vdisplay); formats = simpledrm_device_formats(sdev, &nformats); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 9403c3b36aca..6407a006d6ec 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -221,6 +221,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) int ret; u32 mmu_debug; u32 ident1; + u64 mask; v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm); @@ -240,8 +241,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) return ret; mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); - dma_set_mask_and_coherent(dev, - DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH))); + mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + ret = dma_set_mask_and_coherent(dev, mask); + if (ret) + return ret; + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); ident1 = V3D_READ(V3D_HUB_IDENT1); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 772b5831bcc6..805d6f6cba0e 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -625,7 +625,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (!render->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -678,6 +678,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(last_job->bo, last_job->bo_count, &acquire_ctx); fail: @@ -854,7 +855,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, args->perfmon_id); if (!job->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -886,6 +887,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index e3ed52d96f42..3e61184e194c 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -538,9 +538,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (ret) return ret; - ret = pm_runtime_put(&vc4_hdmi->pdev->dev); - if (ret) - return ret; + /* + * post_crtc_powerdown will have called pm_runtime_put, so we + * don't need it here otherwise we'll get the reference counting + * wrong. + */ return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d09c1ea60c04..ca8506316660 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) unsigned long phy_clock; int ret; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret) { DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); return; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 9170d948b448..07887cbfd9cb 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1522,6 +1522,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev)); return PTR_ERR(codec_pdev); } + vc4_hdmi->audio.codec_pdev = codec_pdev; dai_link->cpus = &vc4_hdmi->audio.cpu; dai_link->codecs = &vc4_hdmi->audio.codec; @@ -1561,6 +1562,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) } +static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi) +{ + platform_device_unregister(vc4_hdmi->audio.codec_pdev); + vc4_hdmi->audio.codec_pdev = NULL; +} + static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -2298,6 +2305,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, kfree(vc4_hdmi->hdmi_regset.regs); kfree(vc4_hdmi->hd_regset.regs); + vc4_hdmi_audio_exit(vc4_hdmi); vc4_hdmi_cec_exit(vc4_hdmi); vc4_hdmi_hotplug_exit(vc4_hdmi); vc4_hdmi_connector_destroy(&vc4_hdmi->connector); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 33e9f665ab8e..c0492da73683 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -113,6 +113,7 @@ struct vc4_hdmi_audio { struct snd_soc_dai_link_component platform; struct snd_dmaengine_dai_dma_data dma_data; struct hdmi_audio_infoframe infoframe; + struct platform_device *codec_pdev; bool streaming; }; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 2de61b63ef91..48d3c9955f0d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -248,6 +248,9 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) { u32 i; + if (!objs) + return; + for (i = 0; i < objs->nents; i++) drm_gem_object_put(objs->objs[i]); virtio_gpu_array_free(objs); |