diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 320 |
1 files changed, 169 insertions, 151 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2f5209de0391..07c07d55398b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,12 +49,14 @@ #include "i915_drv.h" #include "i915_trace.h" #include "i915_pmu.h" +#include "i915_query.h" #include "i915_vgpu.h" #include "intel_drv.h" #include "intel_uc.h" static struct drm_driver driver; +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) static unsigned int i915_load_fail_count; bool __i915_inject_load_failure(const char *func, int line) @@ -70,6 +72,7 @@ bool __i915_inject_load_failure(const char *func, int line) return false; } +#endif #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ @@ -107,8 +110,12 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, static bool i915_error_injected(struct drm_i915_private *dev_priv) { +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) return i915_modparams.inject_load_failure && i915_load_fail_count == i915_modparams.inject_load_failure; +#else + return false; +#endif } #define i915_load_error(dev_priv, fmt, ...) \ @@ -116,10 +123,90 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv) i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) +/* Map PCH device id to PCH type, or PCH_NONE if unknown. */ +static enum intel_pch +intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) +{ + switch (id) { + case INTEL_PCH_IBX_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); + WARN_ON(!IS_GEN5(dev_priv)); + return PCH_IBX; + case INTEL_PCH_CPT_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found CougarPoint PCH\n"); + WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); + return PCH_CPT; + case INTEL_PCH_PPT_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found PantherPoint PCH\n"); + WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); + /* PantherPoint is CPT compatible */ + return PCH_CPT; + case INTEL_PCH_LPT_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found LynxPoint PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); + return PCH_LPT; + case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); + return PCH_LPT; + case INTEL_PCH_WPT_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); + /* WildcatPoint is LPT compatible */ + return PCH_LPT; + case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); + /* WildcatPoint is LPT compatible */ + return PCH_LPT; + case INTEL_PCH_SPT_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); + return PCH_SPT; + case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); + return PCH_SPT; + case INTEL_PCH_KBP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); + return PCH_KBP; + case INTEL_PCH_CNP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); + WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); + return PCH_CNP; + case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); + WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); + return PCH_CNP; + case INTEL_PCH_ICP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Ice Lake PCH\n"); + WARN_ON(!IS_ICELAKE(dev_priv)); + return PCH_ICP; + default: + return PCH_NONE; + } +} + +static bool intel_is_virt_pch(unsigned short id, + unsigned short svendor, unsigned short sdevice) +{ + return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || + id == INTEL_PCH_P3X_DEVICE_ID_TYPE || + (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && + svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && + sdevice == PCI_SUBDEVICE_ID_QEMU)); +} -static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) +static unsigned short +intel_virt_detect_pch(const struct drm_i915_private *dev_priv) { - enum intel_pch ret = PCH_NOP; + unsigned short id = 0; /* * In a virtualized passthrough environment we can be in a @@ -128,28 +215,25 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) * make an educated guess as to which PCH is really there. */ - if (IS_GEN5(dev_priv)) { - ret = PCH_IBX; - DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); - } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - ret = PCH_CPT; - DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - ret = PCH_LPT; - if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) - dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; - else - dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; - DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); - } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - ret = PCH_SPT; - DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); - } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { - ret = PCH_CNP; - DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); - } + if (IS_GEN5(dev_priv)) + id = INTEL_PCH_IBX_DEVICE_ID_TYPE; + else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) + id = INTEL_PCH_CPT_DEVICE_ID_TYPE; + else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) + id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + id = INTEL_PCH_LPT_DEVICE_ID_TYPE; + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + id = INTEL_PCH_SPT_DEVICE_ID_TYPE; + else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) + id = INTEL_PCH_CNP_DEVICE_ID_TYPE; + + if (id) + DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); + else + DRM_DEBUG_KMS("Assuming no PCH\n"); - return ret; + return id; } static void intel_detect_pch(struct drm_i915_private *dev_priv) @@ -176,94 +260,31 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) * of only checking the first one. */ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { - if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + unsigned short id; + enum intel_pch pch_type; - dev_priv->pch_id = id; + if (pch->vendor != PCI_VENDOR_ID_INTEL) + continue; - if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_IBX; - DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); - WARN_ON(!IS_GEN5(dev_priv)); - } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_CPT; - DRM_DEBUG_KMS("Found CougarPoint PCH\n"); - WARN_ON(!IS_GEN6(dev_priv) && - !IS_IVYBRIDGE(dev_priv)); - } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { - /* PantherPoint is CPT compatible */ - dev_priv->pch_type = PCH_CPT; - DRM_DEBUG_KMS("Found PantherPoint PCH\n"); - WARN_ON(!IS_GEN6(dev_priv) && - !IS_IVYBRIDGE(dev_priv)); - } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_LPT; - DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && - !IS_BROADWELL(dev_priv)); - WARN_ON(IS_HSW_ULT(dev_priv) || - IS_BDW_ULT(dev_priv)); - } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_LPT; - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && - !IS_BROADWELL(dev_priv)); - WARN_ON(!IS_HSW_ULT(dev_priv) && - !IS_BDW_ULT(dev_priv)); - } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { - /* WildcatPoint is LPT compatible */ - dev_priv->pch_type = PCH_LPT; - DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && - !IS_BROADWELL(dev_priv)); - WARN_ON(IS_HSW_ULT(dev_priv) || - IS_BDW_ULT(dev_priv)); - } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { - /* WildcatPoint is LPT compatible */ - dev_priv->pch_type = PCH_LPT; - DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && - !IS_BROADWELL(dev_priv)); - WARN_ON(!IS_HSW_ULT(dev_priv) && - !IS_BDW_ULT(dev_priv)); - } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_SPT; - DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && - !IS_KABYLAKE(dev_priv)); - } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_SPT; - DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && - !IS_KABYLAKE(dev_priv)); - } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_KBP; - DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && - !IS_KABYLAKE(dev_priv) && - !IS_COFFEELAKE(dev_priv)); - } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_CNP; - DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); - WARN_ON(!IS_CANNONLAKE(dev_priv) && - !IS_COFFEELAKE(dev_priv)); - } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_CNP; - DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); - WARN_ON(!IS_CANNONLAKE(dev_priv) && - !IS_COFFEELAKE(dev_priv)); - } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || - id == INTEL_PCH_P3X_DEVICE_ID_TYPE || - (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && - pch->subsystem_vendor == - PCI_SUBVENDOR_ID_REDHAT_QUMRANET && - pch->subsystem_device == - PCI_SUBDEVICE_ID_QEMU)) { - dev_priv->pch_type = - intel_virt_detect_pch(dev_priv); - } else - continue; + id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + pch_type = intel_pch_type(dev_priv, id); + if (pch_type != PCH_NONE) { + dev_priv->pch_type = pch_type; + dev_priv->pch_id = id; + break; + } else if (intel_is_virt_pch(id, pch->subsystem_vendor, + pch->subsystem_device)) { + id = intel_virt_detect_pch(dev_priv); + if (id) { + pch_type = intel_pch_type(dev_priv, id); + if (WARN_ON(pch_type == PCH_NONE)) + pch_type = PCH_NOP; + } else { + pch_type = PCH_NOP; + } + dev_priv->pch_type = pch_type; + dev_priv->pch_id = id; break; } } @@ -273,8 +294,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) pci_dev_put(pch); } -static int i915_getparam(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int i915_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; @@ -368,13 +389,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = i915_gem_mmap_gtt_version(); break; case I915_PARAM_HAS_SCHEDULER: - value = 0; - if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { - value |= I915_SCHEDULER_CAP_ENABLED; - value |= I915_SCHEDULER_CAP_PRIORITY; - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv)) - value |= I915_SCHEDULER_CAP_PREEMPTION; - } + value = dev_priv->caps.scheduler; break; case I915_PARAM_MMAP_VERSION: @@ -414,7 +429,7 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_SUBSLICE_MASK: - value = INTEL_INFO(dev_priv)->sseu.subslice_mask; + value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0]; if (!value) return -ENODEV; break; @@ -622,7 +637,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_uc_fini_wq(dev_priv); + intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); i915_gem_drain_freed_objects(dev_priv); @@ -794,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) /* * The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed - * by the GPU. i915_gem_retire_requests() is called directly when we + * by the GPU. i915_retire_requests() is called directly when we * need high-priority retirement, such as waiting for an explicit * bo. * @@ -866,6 +881,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) /** * i915_driver_init_early - setup state not requiring device access * @dev_priv: device private + * @ent: the matching pci_device_id * * Initialize everything that is a "SW-only" state, that is state not * requiring accessing the device or exposing the driver via kernel internal @@ -891,11 +907,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, BUILD_BUG_ON(INTEL_MAX_PLATFORMS > sizeof(device_info->platform_mask) * BITS_PER_BYTE); - device_info->platform_mask = BIT(device_info->platform); - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); - device_info->gen_mask = BIT(device_info->gen - 1); - spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); @@ -1599,15 +1611,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; - bool fw_csr; int ret; disable_rpm_wakeref_asserts(dev_priv); intel_display_set_init_power(dev_priv, false); - fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation && - suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; /* * In case of firmware assisted context save/restore don't manually * deinit the power domains. This also means the CSR/DMC firmware will @@ -1615,8 +1624,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) * also enable deeper system power states that would be blocked if the * firmware was inactive. */ - if (!fw_csr) + if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) || + dev_priv->csr.dmc_payload == NULL) { intel_power_domains_suspend(dev_priv); + dev_priv->power_domains_suspended = true; + } ret = 0; if (IS_GEN9_LP(dev_priv)) @@ -1628,8 +1640,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (ret) { DRM_ERROR("Suspend complete failed: %d\n", ret); - if (!fw_csr) + if (dev_priv->power_domains_suspended) { intel_power_domains_init_hw(dev_priv, true); + dev_priv->power_domains_suspended = false; + } goto out; } @@ -1650,8 +1664,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (!(hibernation && INTEL_GEN(dev_priv) < 6)) pci_set_power_state(pdev, PCI_D3hot); - dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); - out: enable_rpm_wakeref_asserts(dev_priv); @@ -1818,8 +1830,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_resume_early(dev_priv); if (IS_GEN9_LP(dev_priv)) { - if (!dev_priv->suspended_to_idle) - gen9_sanitize_dc_state(dev_priv); + gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { hsw_disable_pc8(dev_priv); @@ -1827,8 +1838,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_sanitize(dev_priv); - if (IS_GEN9_LP(dev_priv) || - !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) + if (dev_priv->power_domains_suspended) intel_power_domains_init_hw(dev_priv, true); else intel_display_set_init_power(dev_priv, true); @@ -1838,7 +1848,7 @@ static int i915_drm_resume_early(struct drm_device *dev) enable_rpm_wakeref_asserts(dev_priv); out: - dev_priv->suspended_to_idle = false; + dev_priv->power_domains_suspended = false; return ret; } @@ -1900,7 +1910,6 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) ret = i915_gem_reset_prepare(i915); if (ret) { dev_err(i915->drm.dev, "GPU recovery failed\n"); - intel_gpu_reset(i915, ALL_ENGINES); goto taint; } @@ -1932,7 +1941,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) */ ret = i915_ggtt_enable_hw(i915); if (ret) { - DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret); + DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", + ret); goto error; } @@ -1949,7 +1959,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) */ ret = i915_gem_init_hw(i915); if (ret) { - DRM_ERROR("Failed hw init on reset %d\n", ret); + DRM_ERROR("Failed to initialise HW following reset (%d)\n", + ret); goto error; } @@ -1980,7 +1991,8 @@ taint: add_taint(TAINT_WARN, LOCKDEP_STILL_OK); error: i915_gem_set_wedged(i915); - i915_gem_retire_requests(i915); + i915_retire_requests(i915); + intel_gpu_reset(i915, ALL_ENGINES); goto finish; } @@ -2006,7 +2018,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) { struct i915_gpu_error *error = &engine->i915->gpu_error; - struct drm_i915_gem_request *active_request; + struct i915_request *active_request; int ret; GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); @@ -2562,7 +2574,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_guc_suspend(dev_priv); + intel_uc_suspend(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); @@ -2584,6 +2596,11 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); + intel_uc_resume(dev_priv); + + i915_gem_init_swizzling(dev_priv); + i915_gem_restore_fences(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); return ret; @@ -2649,8 +2666,6 @@ static int intel_runtime_resume(struct device *kdev) if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); - intel_guc_resume(dev_priv); - if (IS_GEN9_LP(dev_priv)) { bxt_disable_dc9(dev_priv); bxt_display_core_init(dev_priv, true); @@ -2665,6 +2680,10 @@ static int intel_runtime_resume(struct device *kdev) intel_uncore_runtime_resume(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); + + intel_uc_resume(dev_priv); + /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). @@ -2672,8 +2691,6 @@ static int intel_runtime_resume(struct device *kdev) i915_gem_init_swizzling(dev_priv); i915_gem_restore_fences(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); - /* * On VLV/CHV display interrupts are part of the display * power well, so hpd is reinitialized from there. For @@ -2765,7 +2782,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), @@ -2777,8 +2794,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), @@ -2797,11 +2814,11 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), @@ -2814,6 +2831,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), }; static struct drm_driver driver = { |