diff options
Diffstat (limited to 'drivers/gpu/drm/xe/display')
-rw-r--r-- | drivers/gpu/drm/xe/display/ext/i915_irq.c | 69 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_bo.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fb_bo.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fb_bo.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display.c | 360 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display.h | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display_rpm.c | 71 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display_rps.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display_wa.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_fb_pin.c | 132 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 135 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_plane_initial.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_tdf.c | 6 |
15 files changed, 626 insertions, 389 deletions
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c index eb40f1cb44f6..3c6bca66ddab 100644 --- a/drivers/gpu/drm/xe/display/ext/i915_irq.c +++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c @@ -7,25 +7,24 @@ #include "i915_reg.h" #include "intel_uncore.h" -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); u32 val = intel_uncore_read(uncore, reg); @@ -42,32 +41,42 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen3_assert_iir_is_zero(uncore, iir); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); +} + +void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs) +{ + intel_uncore_write(uncore, regs.emr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.emr); + + intel_uncore_write(uncore, regs.eir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.eir); + intel_uncore_write(uncore, regs.eir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.eir); +} + +void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs, + u32 emr_val) +{ + intel_uncore_write(uncore, regs.eir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.eir); + intel_uncore_write(uncore, regs.eir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.eir); + + intel_uncore_write(uncore, regs.emr, emr_val); + intel_uncore_posting_read(uncore, regs.emr); } bool intel_irqs_enabled(struct xe_device *xe) { - /* - * XXX: i915 has a racy handling of the irq.enabled, since it doesn't - * lock its transitions. Because of that, the irq.enabled sometimes - * is not read with the irq.lock in place. - * However, the most critical cases like vblank and page flips are - * properly using the locks. - * We cannot take the lock in here or run any kind of assert because - * of i915 inconsistency. - * But at this point the xe irq is better protected against races, - * although the full solution would be protecting the i915 side. - */ - return xe->irq.enabled; + return atomic_read(&xe->irq.enabled); } void intel_synchronize_irq(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c new file mode 100644 index 000000000000..27437c22bd70 --- /dev/null +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <drm/drm_gem.h> + +#include "xe_bo.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + /* legacy tiling is unused */ + return false; +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + /* xe does not have userptr bos */ + return false; +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return false; +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return xe_bo_is_protected(gem_to_xe_bo(obj)); +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return drm_gem_prime_mmap(obj, vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + struct xe_bo *bo = gem_to_xe_bo(obj); + + return xe_bo_read(bo, offset, dst, size); +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return NULL; +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return front; +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index 63ce97cc4cfe..ebdb22c9499d 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -11,8 +11,10 @@ #include "intel_fb_bo.h" #include "xe_bo.h" -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { + struct xe_bo *bo = gem_to_xe_bo(obj); + if (bo->flags & XE_BO_FLAG_PINNED) { /* Unpin our kernel fb first */ xe_bo_lock(bo, false); @@ -22,10 +24,11 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) xe_bo_put(bo); } -int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, +int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(bo->ttm.base.dev); int ret; @@ -47,10 +50,10 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, /* * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is * automatically set when creating FB. We cannot change caching - * mode when the boect is VM_BINDed, so we can only set + * mode when the bo is VM_BINDed, so we can only set * coherency with display when unbound. */ - if (XE_IOCTL_DBG(xe, !list_empty(&bo->ttm.base.gpuva.list))) { + if (XE_IOCTL_DBG(xe, xe_bo_is_vm_bound(bo))) { ttm_bo_unreserve(&bo->ttm); ret = -EINVAL; goto err; @@ -65,11 +68,12 @@ err: return ret; } -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) +struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_gem_object *bo; + struct xe_device *xe = to_xe_device(drm); + struct xe_bo *bo; struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]); if (!gem) @@ -77,12 +81,12 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, bo = gem_to_xe_bo(gem); /* Require vram placement or dma-buf import */ - if (IS_DGFX(i915) && - !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) && + if (IS_DGFX(xe) && + !xe_bo_can_migrate(bo, XE_PL_VRAM0) && bo->ttm.type != ttm_bo_type_sg) { drm_gem_object_put(gem); return ERR_PTR(-EREMOTE); } - return bo; + return gem; } diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h deleted file mode 100644 index 5d365b925b7a..000000000000 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __INTEL_FB_BO_H__ -#define __INTEL_FB_BO_H__ - -struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_private; -struct intel_framebuffer; -struct xe_bo; - -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo); -int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, - struct drm_mode_fb_cmd2 *mode_cmd); - -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd); - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 99499d6c0256..e8191562d122 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_fb_helper.h> #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fbdev_fb.h" #include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" @@ -20,7 +21,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct xe_device *xe = to_xe_device(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; + struct xe_bo *obj; int size; /* we don't do packed 24bpp */ @@ -44,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | XE_BO_FLAG_STOLEN | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_GGTT); if (!IS_ERR(obj)) drm_info(&xe->drm, "Allocated fbdev into stolen\n"); else @@ -55,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_GGTT); } if (IS_ERR(obj)) { @@ -64,13 +65,13 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, goto err; } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); if (IS_ERR(fb)) { xe_bo_unpin_map_no_vm(obj); goto err; } - drm_gem_object_put(intel_bo_to_drm_bo(obj)); + drm_gem_object_put(&obj->ttm.base); return to_intel_framebuffer(fb); @@ -78,10 +79,11 @@ err: return ERR_CAST(fb); } -int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) +int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, + struct drm_gem_object *_obj, struct i915_vma *vma) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct xe_bo *obj = gem_to_xe_bo(_obj); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { if (obj->flags & XE_BO_FLAG_STOLEN) @@ -100,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info XE_WARN_ON(iosys_map_is_null(&obj->vmap)); info->screen_base = obj->vmap.vaddr_iomem; - info->screen_size = intel_bo_to_drm_bo(obj)->size; + info->screen_size = obj->ttm.base.size; return 0; } diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 75736faf2a80..9f4ade25787a 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -4,16 +4,18 @@ */ #include "xe_display.h" -#include "regs/xe_regs.h" +#include "regs/xe_irq_regs.h" #include <linux/fb.h> +#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> #include <uapi/drm/xe_drm.h> #include "soc/intel_dram.h" -#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */ #include "intel_acpi.h" #include "intel_audio.h" #include "intel_bw.h" @@ -22,19 +24,21 @@ #include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dmc.h" +#include "intel_dmc_wl.h" #include "intel_dp.h" #include "intel_encoder.h" #include "intel_fbdev.h" #include "intel_hdcp.h" #include "intel_hotplug.h" #include "intel_opregion.h" +#include "skl_watermark.h" #include "xe_module.h" /* Xe device functions */ static bool has_display(struct xe_device *xe) { - return HAS_DISPLAY(xe); + return HAS_DISPLAY(&xe->display); } /** @@ -65,6 +69,10 @@ void xe_display_driver_set_hooks(struct drm_driver *driver) if (!xe_modparam.probe_display) return; +#ifdef CONFIG_DRM_FBDEV_EMULATION + driver->fbdev_probe = intel_fbdev_driver_fbdev_probe; +#endif + driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; } @@ -96,35 +104,13 @@ int xe_display_create(struct xe_device *xe) spin_lock_init(&xe->display.fb_tracking.lock); xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); + if (!xe->display.hotplug.dp_wq) + return -ENOMEM; return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); } -static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) -{ - struct xe_device *xe = to_xe_device(dev); - - if (!xe->info.probe_display) - return; - - intel_power_domains_cleanup(xe); -} - -int xe_display_init_nommio(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return 0; - - /* Fake uncore lock */ - spin_lock_init(&xe->uncore.lock); - - /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(xe); - - return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); -} - -static void xe_display_fini_noirq(void *arg) +static void xe_display_fini_early(void *arg) { struct xe_device *xe = arg; struct intel_display *display = &xe->display; @@ -132,11 +118,13 @@ static void xe_display_fini_noirq(void *arg) if (!xe->info.probe_display) return; - intel_display_driver_remove_noirq(xe); + intel_display_driver_remove_nogem(display); + intel_display_driver_remove_noirq(display); intel_opregion_cleanup(display); + intel_power_domains_cleanup(display); } -int xe_display_init_noirq(struct xe_device *xe) +int xe_display_init_early(struct xe_device *xe) { struct intel_display *display = &xe->display; int err; @@ -144,7 +132,10 @@ int xe_display_init_noirq(struct xe_device *xe) if (!xe->info.probe_display) return 0; - intel_display_driver_early_probe(xe); + /* Fake uncore lock */ + spin_lock_init(&xe->uncore.lock); + + intel_display_driver_early_probe(display); /* Early display init.. */ intel_opregion_setup(display); @@ -155,99 +146,86 @@ int xe_display_init_noirq(struct xe_device *xe) */ intel_dram_detect(xe); - intel_bw_init_hw(xe); + intel_bw_init_hw(display); - intel_display_device_info_runtime_init(xe); + intel_display_device_info_runtime_init(display); - err = intel_display_driver_probe_noirq(xe); - if (err) { - intel_opregion_cleanup(display); - return err; - } + err = intel_display_driver_probe_noirq(display); + if (err) + goto err_opregion; - return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe); + err = intel_display_driver_probe_nogem(display); + if (err) + goto err_noirq; + + return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe); +err_noirq: + intel_display_driver_remove_noirq(display); + intel_power_domains_cleanup(display); +err_opregion: + intel_opregion_cleanup(display); + return err; } -static void xe_display_fini_noaccel(void *arg) +static void xe_display_fini(void *arg) { struct xe_device *xe = arg; + struct intel_display *display = &xe->display; - if (!xe->info.probe_display) - return; - - intel_display_driver_remove_nogem(xe); + intel_hpd_poll_fini(display); + intel_hdcp_component_fini(display); + intel_audio_deinit(display); + intel_display_driver_remove(display); } -int xe_display_init_noaccel(struct xe_device *xe) +int xe_display_init(struct xe_device *xe) { + struct intel_display *display = &xe->display; int err; if (!xe->info.probe_display) return 0; - err = intel_display_driver_probe_nogem(xe); + err = intel_display_driver_probe(display); if (err) return err; - return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe); -} - -int xe_display_init(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return 0; - - return intel_display_driver_probe(xe); -} - -void xe_display_fini(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return; - - intel_hpd_poll_fini(xe); - - intel_hdcp_component_fini(xe); - intel_audio_deinit(xe); + return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe); } void xe_display_register(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - intel_display_driver_register(xe); - intel_register_dsm_handler(); - intel_power_domains_enable(xe); + intel_display_driver_register(display); + intel_power_domains_enable(display); } void xe_display_unregister(struct xe_device *xe) { - if (!xe->info.probe_display) - return; - - intel_unregister_dsm_handler(); - intel_power_domains_disable(xe); - intel_display_driver_unregister(xe); -} + struct intel_display *display = &xe->display; -void xe_display_driver_remove(struct xe_device *xe) -{ if (!xe->info.probe_display) return; - intel_display_driver_remove(xe); + intel_power_domains_disable(display); + intel_display_driver_unregister(display); } /* IRQ-related functions */ void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (master_ctl & DISPLAY_IRQ) - gen11_display_irq_handler(xe); + gen11_display_irq_handler(display); } void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) @@ -263,19 +241,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) void xe_display_irq_reset(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - gen11_display_irq_reset(xe); + gen11_display_irq_reset(display); } void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (gt->info.id == XE_GT0) - gen11_de_irq_postinstall(xe); + gen11_de_irq_postinstall(display); } static bool suspend_to_idle(void) @@ -308,22 +290,58 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe) } } -/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */ -void xe_display_pm_runtime_suspend(struct xe_device *xe) +static void xe_display_enable_d3cold(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - if (xe->d3cold.allowed) - xe_display_pm_suspend(xe, true); + /* + * We do a lot of poking in a lot of registers, make sure they work + * properly. + */ + intel_power_domains_disable(display); + + xe_display_flush_cleanup_work(xe); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(display); + + if (has_display(xe)) + intel_hpd_poll_enable(display); +} + +static void xe_display_disable_d3cold(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; - intel_hpd_poll_enable(xe); + intel_dmc_resume(display); + + if (has_display(xe)) + drm_mode_config_reset(&xe->drm); + + intel_display_driver_init_hw(display); + + intel_hpd_init(display); + + if (has_display(xe)) + intel_hpd_poll_disable(display); + + intel_opregion_resume(display); + + intel_power_domains_enable(display); } -void xe_display_pm_suspend(struct xe_device *xe, bool runtime) +void xe_display_pm_suspend(struct xe_device *xe) { struct intel_display *display = &xe->display; bool s2idle = suspend_to_idle(); + if (!xe->info.probe_display) return; @@ -331,113 +349,201 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime) * We do a lot of poking in a lot of registers, make sure they work * properly. */ - intel_power_domains_disable(xe); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); - if (!runtime && has_display(xe)) { + intel_power_domains_disable(display); + drm_client_dev_suspend(&xe->drm, false); + + if (has_display(xe)) { drm_kms_helper_poll_disable(&xe->drm); - intel_display_driver_disable_user_access(xe); - intel_display_driver_suspend(xe); + intel_display_driver_disable_user_access(display); + intel_display_driver_suspend(display); } xe_display_flush_cleanup_work(xe); - intel_dp_mst_suspend(xe); - - intel_hpd_cancel_work(xe); + intel_hpd_cancel_work(display); - if (!runtime && has_display(xe)) { - intel_display_driver_suspend_access(xe); + if (has_display(xe)) { + intel_display_driver_suspend_access(display); intel_encoder_suspend_all(&xe->display); } intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); - intel_dmc_suspend(xe); + intel_dmc_suspend(display); } -void xe_display_pm_suspend_late(struct xe_device *xe) +void xe_display_pm_shutdown(struct xe_device *xe) { - bool s2idle = suspend_to_idle(); + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_disable(display); + drm_client_dev_suspend(&xe->drm, false); + + if (has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(display); + intel_display_driver_suspend(display); + } + + xe_display_flush_cleanup_work(xe); + intel_dp_mst_suspend(display); + intel_hpd_cancel_work(display); + + if (has_display(xe)) + intel_display_driver_suspend_access(display); + + intel_encoder_suspend_all(display); + intel_encoder_shutdown_all(display); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(display); +} + +void xe_display_pm_runtime_suspend(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - intel_power_domains_suspend(xe, s2idle); + if (xe->d3cold.allowed) { + xe_display_enable_d3cold(xe); + return; + } - intel_display_power_suspend_late(xe); + intel_hpd_poll_enable(display); } -void xe_display_pm_runtime_resume(struct xe_device *xe) +void xe_display_pm_suspend_late(struct xe_device *xe) { + struct intel_display *display = &xe->display; + bool s2idle = suspend_to_idle(); + if (!xe->info.probe_display) return; - intel_hpd_poll_disable(xe); + intel_display_power_suspend_late(display, s2idle); +} + +void xe_display_pm_runtime_suspend_late(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; if (xe->d3cold.allowed) - xe_display_pm_resume(xe, true); + xe_display_pm_suspend_late(xe); + + /* + * If xe_display_pm_suspend_late() is not called, it is likely + * that we will be on dynamic DC states with DMC wakelock enabled. We + * need to flush the release work in that case. + */ + intel_dmc_wl_flush_release_work(display); } -void xe_display_pm_resume_early(struct xe_device *xe) +void xe_display_pm_shutdown_late(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - intel_display_power_resume_early(xe); + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled. + */ + intel_power_domains_driver_remove(display); +} + +void xe_display_pm_resume_early(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; - intel_power_domains_resume(xe); + intel_display_power_resume_early(display); } -void xe_display_pm_resume(struct xe_device *xe, bool runtime) +void xe_display_pm_resume(struct xe_device *xe) { struct intel_display *display = &xe->display; if (!xe->info.probe_display) return; - intel_dmc_resume(xe); + intel_dmc_resume(display); if (has_display(xe)) drm_mode_config_reset(&xe->drm); - intel_display_driver_init_hw(xe); - intel_hpd_init(xe); + intel_display_driver_init_hw(display); - if (!runtime && has_display(xe)) - intel_display_driver_resume_access(xe); + if (has_display(xe)) + intel_display_driver_resume_access(display); + + intel_hpd_init(display); - /* MST sideband requires HPD interrupts enabled */ - intel_dp_mst_resume(xe); - if (!runtime && has_display(xe)) { - intel_display_driver_resume(xe); + if (has_display(xe)) { + intel_display_driver_resume(display); drm_kms_helper_poll_enable(&xe->drm); - intel_display_driver_enable_user_access(xe); - intel_hpd_poll_disable(xe); + intel_display_driver_enable_user_access(display); } + if (has_display(xe)) + intel_hpd_poll_disable(display); + intel_opregion_resume(display); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); + drm_client_dev_resume(&xe->drm, false); - intel_power_domains_enable(xe); + intel_power_domains_enable(display); } +void xe_display_pm_runtime_resume(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + xe_display_disable_d3cold(xe); + return; + } + + intel_hpd_init(display); + intel_hpd_poll_disable(display); + skl_watermark_ipc_update(display); +} + + static void display_device_remove(struct drm_device *dev, void *arg) { - struct xe_device *xe = arg; + struct intel_display *display = arg; - intel_display_device_remove(xe); + intel_display_device_remove(display); } int xe_display_probe(struct xe_device *xe) { + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + struct intel_display *display; int err; if (!xe->info.probe_display) goto no_display; - intel_display_device_probe(xe); + display = intel_display_device_probe(pdev); - err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe); + err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display); if (err) return err; diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index 53d727fd792b..46e14f8dee28 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -14,17 +14,13 @@ struct drm_driver; bool xe_display_driver_probe_defer(struct pci_dev *pdev); void xe_display_driver_set_hooks(struct drm_driver *driver); -void xe_display_driver_remove(struct xe_device *xe); int xe_display_create(struct xe_device *xe); int xe_display_probe(struct xe_device *xe); -int xe_display_init_nommio(struct xe_device *xe); -int xe_display_init_noirq(struct xe_device *xe); -int xe_display_init_noaccel(struct xe_device *xe); +int xe_display_init_early(struct xe_device *xe); int xe_display_init(struct xe_device *xe); -void xe_display_fini(struct xe_device *xe); void xe_display_register(struct xe_device *xe); void xe_display_unregister(struct xe_device *xe); @@ -34,11 +30,14 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); -void xe_display_pm_suspend(struct xe_device *xe, bool runtime); +void xe_display_pm_suspend(struct xe_device *xe); +void xe_display_pm_shutdown(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); +void xe_display_pm_shutdown_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); -void xe_display_pm_resume(struct xe_device *xe, bool runtime); +void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); +void xe_display_pm_runtime_suspend_late(struct xe_device *xe); void xe_display_pm_runtime_resume(struct xe_device *xe); #else @@ -51,11 +50,8 @@ static inline int xe_display_create(struct xe_device *xe) { return 0; } static inline int xe_display_probe(struct xe_device *xe) { return 0; } -static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; } -static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; } -static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; } +static inline int xe_display_init_early(struct xe_device *xe) { return 0; } static inline int xe_display_init(struct xe_device *xe) { return 0; } -static inline void xe_display_fini(struct xe_device *xe) {} static inline void xe_display_register(struct xe_device *xe) {} static inline void xe_display_unregister(struct xe_device *xe) {} @@ -65,11 +61,14 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} -static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} -static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {} static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {} #endif /* CONFIG_DRM_XE_DISPLAY */ diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c new file mode 100644 index 000000000000..1955153aadba --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "intel_display_rpm.h" +#include "xe_device_types.h" +#include "xe_pm.h" + +static struct xe_device *display_to_xe(struct intel_display *display) +{ + return container_of(display, struct xe_device, display); +} + +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +{ + return intel_display_rpm_get(display); +} + +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_display_rpm_put(display, wakeref); +} + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +{ + return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) +{ + return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) +{ + xe_pm_runtime_get_noresume(display_to_xe(display)); + + return INTEL_WAKEREF_DEF; +} + +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +{ + if (wakeref) + xe_pm_runtime_put(display_to_xe(display)); +} + +void intel_display_rpm_put_unchecked(struct intel_display *display) +{ + xe_pm_runtime_put(display_to_xe(display)); +} + +bool intel_display_rpm_suspended(struct intel_display *display) +{ + struct xe_device *xe = display_to_xe(display); + + return pm_runtime_suspended(xe->drm.dev); +} + +void assert_display_rpm_held(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_block(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_unblock(struct intel_display *display) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c deleted file mode 100644 index ab21c581c192..000000000000 --- a/drivers/gpu/drm/xe/display/xe_display_rps.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2023 Intel Corporation - */ - -#include "intel_display_rps.h" - -void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, - struct dma_fence *fence) -{ -} - -void intel_display_rps_mark_interactive(struct drm_i915_private *i915, - struct intel_atomic_state *state, - bool interactive) -{ -} diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c index 68e3d1959ad6..2933ca97d673 100644 --- a/drivers/gpu/drm/xe/display/xe_display_wa.c +++ b/drivers/gpu/drm/xe/display/xe_display_wa.c @@ -10,7 +10,9 @@ #include <generated/xe_wa_oob.h> -bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) +bool intel_display_needs_wa_16023588340(struct intel_display *display) { - return XE_WA(xe_root_mmio_gt(i915), 16023588340); + struct xe_device *xe = to_xe_device(display->drm); + + return XE_WA(xe_root_mmio_gt(xe), 16023588340); } diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f99d901a3214..9f941fc2e36b 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); - xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); - xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) @@ -48,11 +42,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d if (!vma) return false; + /* Set scanout flag for WC mapping */ obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -73,5 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(xe); + xe_device_l2_flush(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index b58fc4ba2aac..55259969480b 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -5,10 +5,12 @@ #include <drm/ttm/ttm_bo.h> +#include "i915_vma.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_pin.h" +#include "intel_fbdev.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_ggtt.h" @@ -79,12 +81,14 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + unsigned int alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); struct xe_ggtt *ggtt = tile0->mem.ggtt; - struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; u32 dpt_size, size = bo->ttm.base.size; if (view->type == I915_GTT_VIEW_NORMAL) @@ -98,23 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM0 | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -153,7 +163,10 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, } vma->dpt = dpt; - vma->node = dpt->ggtt_node; + vma->node = dpt->ggtt_node[tile0->id]; + + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return 0; } @@ -183,9 +196,11 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + unsigned int alignment) { - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 align; @@ -203,8 +218,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node; + if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[ggtt->tile->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { u32 x, size = bo->ttm.base.size; @@ -264,17 +279,20 @@ out: } static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, - const struct i915_gtt_view *view) + const struct i915_gtt_view *view, + unsigned int alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); int ret; if (!vma) return ERR_PTR(-ENODEV); + refcount_set(&vma->ref, 1); if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) { @@ -312,14 +330,12 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment); if (ret) goto err_unpin; - /* Ensure DPT writes are flushed */ - xe_device_l2_flush(xe); return vma; err_unpin: @@ -333,10 +349,15 @@ err: static void __xe_unpin_fb_vma(struct i915_vma *vma) { + u8 tile_id = vma->node->ggtt->tile->id; + + if (!refcount_dec_and_test(&vma->ref)) + return; + if (vma->dpt) xe_bo_unpin_map_no_vm(vma->dpt); - else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) || - vma->bo->ggtt_node->base.start != vma->node->base.start) + else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || + vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) xe_ggtt_node_remove(vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); @@ -350,12 +371,13 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, const struct i915_gtt_view *view, unsigned int alignment, unsigned int phys_alignment, + unsigned int vtd_guard, bool uses_fence, unsigned long *out_flags) { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) @@ -363,20 +385,58 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) __xe_unpin_fb_vma(vma); } -int intel_plane_pin_fb(struct intel_plane_state *plane_state) +static bool reuse_vma(struct intel_plane_state *new_plane_state, + const struct intel_plane_state *old_plane_state) +{ + struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb); + struct xe_device *xe = to_xe_device(fb->base.dev); + struct i915_vma *vma; + + if (old_plane_state->hw.fb == new_plane_state->hw.fb && + !memcmp(&old_plane_state->view.gtt, + &new_plane_state->view.gtt, + sizeof(new_plane_state->view.gtt))) { + vma = old_plane_state->ggtt_vma; + goto found; + } + + if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) { + vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev); + if (vma) + goto found; + } + + return false; + +found: + refcount_inc(&vma->ref); + new_plane_state->ggtt_vma = vma; + return true; +} + +int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, + const struct intel_plane_state *old_plane_state) { - struct drm_framebuffer *fb = plane_state->hw.fb; - struct xe_bo *bo = intel_fb_obj(fb); + struct drm_framebuffer *fb = new_plane_state->hw.fb; + struct drm_gem_object *obj = intel_fb_bo(fb); + struct xe_bo *bo = gem_to_xe_bo(obj); struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + unsigned int alignment = plane->min_alignment(plane, fb, 0); + + if (reuse_vma(new_plane_state, old_plane_state)) + return 0; /* We reject creating !SCANOUT fb's, so this is weird.. */ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); + vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment); + if (IS_ERR(vma)) return PTR_ERR(vma); - plane_state->ggtt_vma = vma; + new_plane_state->ggtt_vma = vma; return 0; } diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 6619a40aed15..b35a6f201d4a 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -9,7 +9,6 @@ #include "abi/gsc_command_header_abi.h" #include "intel_hdcp_gsc.h" -#include "intel_hdcp_gsc_message.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_device_types.h" @@ -22,7 +21,8 @@ #define HECI_MEADDRESS_HDCP 18 -struct intel_hdcp_gsc_message { +struct intel_hdcp_gsc_context { + struct xe_device *xe; struct xe_bo *hdcp_bo; u64 hdcp_cmd_in; u64 hdcp_cmd_out; @@ -30,26 +30,24 @@ struct intel_hdcp_gsc_message { #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) -bool intel_hdcp_gsc_cs_required(struct xe_device *xe) -{ - return DISPLAY_VER(xe) >= 14; -} - -bool intel_hdcp_gsc_check_status(struct xe_device *xe) +bool intel_hdcp_gsc_check_status(struct drm_device *drm) { + struct xe_device *xe = to_xe_device(drm); struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_gt *gt = tile->media_gt; struct xe_gsc *gsc = >->uc.gsc; bool ret = true; + unsigned int fw_ref; - if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) { + if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { drm_dbg_kms(&xe->drm, "GSC Components not ready for HDCP2.x\n"); return false; } xe_pm_runtime_get(xe); - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) { drm_dbg_kms(&xe->drm, "failed to get forcewake to check proxy status\n"); ret = false; @@ -59,7 +57,7 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe) if (!xe_gsc_proxy_init_done(gsc)) ret = false; - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: xe_pm_runtime_put(xe); return ret; @@ -67,7 +65,7 @@ out: /*This function helps allocate memory for the command that we will send to gsc cs */ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, - struct intel_hdcp_gsc_message *hdcp_message) + struct intel_hdcp_gsc_context *gsc_context) { struct xe_bo *bo = NULL; u64 cmd_in, cmd_out; @@ -89,104 +87,60 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, cmd_out = cmd_in + PAGE_SIZE; xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); - hdcp_message->hdcp_bo = bo; - hdcp_message->hdcp_cmd_in = cmd_in; - hdcp_message->hdcp_cmd_out = cmd_out; + gsc_context->hdcp_bo = bo; + gsc_context->hdcp_cmd_in = cmd_in; + gsc_context->hdcp_cmd_out = cmd_out; + gsc_context->xe = xe; + out: return ret; } -static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) +struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm) { - struct intel_hdcp_gsc_message *hdcp_message; + struct xe_device *xe = to_xe_device(drm); + struct intel_hdcp_gsc_context *gsc_context; int ret; - hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); - - if (!hdcp_message) - return -ENOMEM; + gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL); + if (!gsc_context) + return ERR_PTR(-ENOMEM); /* * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ - ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message); + ret = intel_hdcp_gsc_initialize_message(xe, gsc_context); if (ret) { - drm_err(&xe->drm, "Could not initialize hdcp_message\n"); - kfree(hdcp_message); - return ret; + drm_err(&xe->drm, "Could not initialize gsc_context\n"); + kfree(gsc_context); + gsc_context = ERR_PTR(ret); } - xe->display.hdcp.hdcp_message = hdcp_message; - return ret; -} - -static const struct i915_hdcp_ops gsc_hdcp_ops = { - .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session, - .verify_receiver_cert_prepare_km = - intel_hdcp_gsc_verify_receiver_cert_prepare_km, - .verify_hprime = intel_hdcp_gsc_verify_hprime, - .store_pairing_info = intel_hdcp_gsc_store_pairing_info, - .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check, - .verify_lprime = intel_hdcp_gsc_verify_lprime, - .get_session_key = intel_hdcp_gsc_get_session_key, - .repeater_check_flow_prepare_ack = - intel_hdcp_gsc_repeater_check_flow_prepare_ack, - .verify_mprime = intel_hdcp_gsc_verify_mprime, - .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication, - .close_hdcp_session = intel_hdcp_gsc_close_session, -}; - -int intel_hdcp_gsc_init(struct xe_device *xe) -{ - struct i915_hdcp_arbiter *data; - int ret; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - mutex_lock(&xe->display.hdcp.hdcp_mutex); - xe->display.hdcp.arbiter = data; - xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev; - xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops; - ret = intel_hdcp_gsc_hdcp2_init(xe); - if (ret) - kfree(data); - - mutex_unlock(&xe->display.hdcp.hdcp_mutex); - - return ret; + return gsc_context; } -void intel_hdcp_gsc_fini(struct xe_device *xe) +void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context) { - struct intel_hdcp_gsc_message *hdcp_message = - xe->display.hdcp.hdcp_message; - struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; - - if (hdcp_message) { - xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); - kfree(hdcp_message); - xe->display.hdcp.hdcp_message = NULL; - } + if (!gsc_context) + return; - kfree(arb); - xe->display.hdcp.arbiter = NULL; + xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo); + kfree(gsc_context); } static int xe_gsc_send_sync(struct xe_device *xe, - struct intel_hdcp_gsc_message *hdcp_message, + struct intel_hdcp_gsc_context *gsc_context, u32 msg_size_in, u32 msg_size_out, u32 addr_out_off) { - struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt; - struct iosys_map *map = &hdcp_message->hdcp_bo->vmap; + struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt; + struct iosys_map *map = &gsc_context->hdcp_bo->vmap; struct xe_gsc *gsc = >->uc.gsc; int ret; - ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in, - hdcp_message->hdcp_cmd_out, msg_size_out); + ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in, + gsc_context->hdcp_cmd_out, msg_size_out); if (ret) { drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret); return ret; @@ -201,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe, return ret; } -ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, - size_t msg_in_len, u8 *msg_out, - size_t msg_out_len) +ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context, + void *msg_in, size_t msg_in_len, + void *msg_out, size_t msg_out_len) { + struct xe_device *xe = gsc_context->xe; const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE; - struct intel_hdcp_gsc_message *hdcp_message; u64 host_session_id; u32 msg_size_in, msg_size_out; u32 addr_out_off, addr_in_wr_off = 0; @@ -219,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE; msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE; - hdcp_message = xe->display.hdcp.hdcp_message; addr_out_off = PAGE_SIZE; host_session_id = xe_gsc_create_host_session_id(); xe_pm_runtime_get_noresume(xe); - addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap, + addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, HECI_MEADDRESS_HDCP, host_session_id, msg_in_len); - xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off, + xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, msg_in, msg_in_len); /* * Keep sending request in case the pending bit is set no need to add @@ -236,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, * 20 times each message 50 ms apart */ do { - ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out, + ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out, addr_out_off); /* Only try again if gsc says so */ @@ -250,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, if (ret) goto out; - xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap, + xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap, addr_out_off + HDCP_GSC_HEADER_SIZE, msg_out_len); diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index a50ab9eae40a..6502b8274173 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -8,7 +8,9 @@ #include "regs/xe_gtt_defs.h" #include "xe_ggtt.h" +#include "xe_mmio.h" +#include "i915_reg.h" #include "intel_atomic_plane.h" #include "intel_crtc.h" #include "intel_display.h" @@ -22,6 +24,21 @@ #include <generated/xe_wa_oob.h> +void intel_plane_initial_vblank_wait(struct intel_crtc *crtc) +{ + /* Early xe has no irq */ + struct xe_device *xe = to_xe_device(crtc->base.dev); + struct xe_reg pipe_frmtmstmp = XE_REG(i915_mmio_reg_offset(PIPE_FRMTMSTMP(crtc->pipe))); + u32 timestamp; + int ret; + + timestamp = xe_mmio_read32(xe_root_tile_mmio(xe), pipe_frmtmstmp); + + ret = xe_mmio_wait32_not(xe_root_tile_mmio(xe), pipe_frmtmstmp, ~0U, timestamp, 40000U, ×tamp, false); + if (ret < 0) + drm_warn(&xe->drm, "waiting for early vblank failed with %i\n", ret); +} + static bool intel_reuse_initial_plane_obj(struct intel_crtc *this, const struct intel_initial_plane_config plane_configs[], @@ -66,7 +83,7 @@ initial_plane_bo(struct xe_device *xe, if (plane_config->size == 0) return NULL; - flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; + flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; base = round_down(plane_config->base, page_size); if (IS_DGFX(xe)) { @@ -170,7 +187,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; if (intel_framebuffer_init(to_intel_framebuffer(fb), - bo, &mode_cmd)) { + &bo->ttm.base, &mode_cmd)) { drm_dbg_kms(&xe->drm, "intel fb init failed\n"); goto err_bo; } @@ -194,8 +211,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -217,7 +232,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, plane_state->uapi.rotation, &plane_state->view); vma = intel_fb_pin_to_ggtt(fb, &plane_state->view.gtt, - 0, 0, false, &plane_state->flags); + 0, 0, 0, false, &plane_state->flags); if (IS_ERR(vma)) goto nofb; @@ -241,14 +256,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); plane_config->vma = vma; - - /* - * Flip to the newly created mapping ASAP, so we can re-use the - * first part of GGTT for WOPCM, prevent flickering, and prevent - * the lookup of sysmem scratch pages. - */ - plane->check_plane(crtc_state, plane_state); - plane->async_flip(plane, crtc_state, plane_state, true); return; nofb: @@ -275,12 +282,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config) } } -void intel_initial_plane_config(struct drm_i915_private *i915) +void intel_initial_plane_config(struct intel_display *display) { struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {}; struct intel_crtc *crtc; - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_initial_plane_config *plane_config = &plane_configs[crtc->pipe]; @@ -294,7 +301,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915) * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ - i915->display.funcs.display->get_initial_plane_config(crtc, plane_config); + display->funcs.display->get_initial_plane_config(crtc, plane_config); /* * If the fb is shared between multiple heads, we'll @@ -302,8 +309,8 @@ void intel_initial_plane_config(struct drm_i915_private *i915) */ intel_find_initial_plane_obj(crtc, plane_configs); - if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config)) - intel_crtc_wait_for_next_vblank(crtc); + if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config)) + intel_plane_initial_vblank_wait(crtc); plane_config_fini(plane_config); } diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c index 2c0d4e144e09..2a7fccbeb1d5 100644 --- a/drivers/gpu/drm/xe/display/xe_tdf.c +++ b/drivers/gpu/drm/xe/display/xe_tdf.c @@ -7,7 +7,9 @@ #include "intel_display_types.h" #include "intel_tdf.h" -void intel_td_flush(struct drm_i915_private *i915) +void intel_td_flush(struct intel_display *display) { - xe_device_td_flush(i915); + struct xe_device *xe = to_xe_device(display->drm); + + xe_device_td_flush(xe); } |