summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c110
1 files changed, 51 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d10e6735771f..fb4afaa8036f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,6 +30,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <drm/i915_powerwell.h>
#define FORCEWAKE_ACK_TIMEOUT_MS 2
@@ -86,7 +87,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -217,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -274,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +326,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
+ if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
@@ -333,12 +334,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
dev_priv->display.enable_fbc(work->crtc,
work->interval);
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
}
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -347,28 +348,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
- if (dev_priv->fbc_work == NULL)
+ if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
+ kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -381,6 +382,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
return;
}
@@ -390,9 +392,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+ dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
@@ -418,7 +418,7 @@ void intel_disable_fbc(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
+ dev_priv->fbc.plane = -1;
}
/**
@@ -448,7 +448,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
if (!i915_powersave)
@@ -471,7 +470,8 @@ void intel_update_fbc(struct drm_device *dev)
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ dev_priv->fbc.no_fbc_reason =
+ FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
@@ -480,7 +480,7 @@ void intel_update_fbc(struct drm_device *dev)
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
@@ -489,23 +489,22 @@ void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- enable_fbc = 0;
+ if (i915_enable_fbc < 0 &&
+ INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
+ goto out_disable;
}
- if (!enable_fbc) {
+ if (!i915_enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
@@ -519,13 +518,13 @@ void intel_update_fbc(struct drm_device *dev)
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
@@ -535,7 +534,7 @@ void intel_update_fbc(struct drm_device *dev)
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -545,7 +544,7 @@ void intel_update_fbc(struct drm_device *dev)
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
@@ -554,9 +553,9 @@ void intel_update_fbc(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
@@ -2468,8 +2467,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+ struct hsw_wm_values *r2)
{
int i, val_r1 = 0, val_r2 = 0;
@@ -3076,19 +3075,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 pval;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- do {
- pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
- break;
- }
- udelay(10);
- } while (pval & 1);
+ if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+ DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
pval >>= 8;
@@ -3143,9 +3135,9 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
}
@@ -3162,9 +3154,9 @@ static void valleyview_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
@@ -3329,13 +3321,13 @@ static void gen6_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
/* FIXME: Our interrupt enabling sequence is bonghits.
* dev_priv->rps.pm_iir really should be 0 here. */
dev_priv->rps.pm_iir = 0;
I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
/* unmask all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -3482,7 +3474,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
- -1,
+ I915_GTT_OFFSET_NONE,
pctx_size);
goto out;
}
@@ -3609,10 +3601,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -3708,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}