summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c182
1 files changed, 90 insertions, 92 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f070d5d769ce..6e73125fc782 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2085,7 +2085,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
}
}
-static void intel_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2907,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_delay << 24;
- if (val <= dev_priv->rps.min_delay)
- limits |= dev_priv->rps.min_delay << 16;
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
return limits;
}
@@ -2921,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
- if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+ if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+ if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+ else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+ if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_delay)
+ if (val == dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_delay)
+ if (val == dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3014,10 +3014,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_delay);
- WARN_ON(val < dev_priv->rps.min_delay);
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
- if (val == dev_priv->rps.cur_delay) {
+ if (val == dev_priv->rps.cur_freq) {
/* min/max delay may still have been modified so be sure to
* write the limits value */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -3045,7 +3045,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
POSTING_READ(GEN6_RPNSWREQ);
- dev_priv->rps.cur_delay = val;
+ dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(val * 50);
}
@@ -3065,7 +3065,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* When we are idle. Drop to min voltage state.
*/
- if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+ if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
return;
/* Mask turbo interrupt so that they will not come in between */
@@ -3082,10 +3082,10 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
}
- dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+ dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
- dev_priv->rps.min_delay);
+ dev_priv->rps.min_freq_softlimit);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 5))
@@ -3099,7 +3099,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
/* Unmask Up interrupts */
dev_priv->rps.rp_up_masked = true;
gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
- dev_priv->rps.min_delay);
+ dev_priv->rps.min_freq_softlimit);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3111,7 +3111,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3124,9 +3124,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3137,20 +3137,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_delay);
- WARN_ON(val < dev_priv->rps.min_delay);
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay,
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq,
vlv_gpu_freq(dev_priv, val), val);
- if (val == dev_priv->rps.cur_delay)
+ if (val == dev_priv->rps.cur_freq)
return;
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- dev_priv->rps.cur_delay = val;
+ dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
@@ -3292,8 +3292,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_delay << 24 |
- dev_priv->rps.min_delay << 16);
+ dev_priv->rps.max_freq_softlimit << 24 |
+ dev_priv->rps.min_freq_softlimit << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -3324,9 +3324,9 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 rp_state_cap;
u32 gt_perf_status;
- u32 rc6vids, pcu_mbox, rc6_mask = 0;
+ u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i, ret;
@@ -3352,20 +3352,23 @@ static void gen6_enable_rps(struct drm_device *dev)
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- /* In units of 50MHz */
- dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
- hw_min = (rp_state_cap >> 16) & 0xff;
- dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
- dev_priv->rps.cur_delay = 0;
+ /* All of these values are in units of 50MHz */
+ dev_priv->rps.cur_freq = 0;
+ /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ /* XXX: only BYT has a special efficient freq */
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ /* hw_max = RP0 until we check for overclocking */
+ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
/* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_delay == 0)
- dev_priv->rps.max_delay = hw_max;
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- if (dev_priv->rps.min_delay == 0)
- dev_priv->rps.min_delay = hw_min;
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3414,21 +3417,19 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (!ret) {
- pcu_mbox = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
- if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
- (dev_priv->rps.max_delay & 0xff) * 50,
- (pcu_mbox & 0xff) * 50);
- dev_priv->rps.hw_max = pcu_mbox & 0xff;
- }
- } else {
+ if (ret)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+ (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
+ (pcu_mbox & 0xff) * 50);
+ dev_priv->rps.max_freq = pcu_mbox & 0xff;
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_enable_rps_interrupts(dev);
@@ -3484,9 +3485,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+ for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
gpu_freq--) {
- int diff = dev_priv->rps.max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -3597,7 +3598,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
+ u32 gtfifodbg, val, rc6_mode = 0;
int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3652,38 +3653,39 @@ static void valleyview_enable_rps(struct drm_device *dev)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
- dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, hw_max),
- hw_max);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
- dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+ dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
- dev_priv->rps.rpe_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, hw_min),
- hw_min);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
/* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_delay == 0)
- dev_priv->rps.max_delay = hw_max;
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- if (dev_priv->rps.min_delay == 0)
- dev_priv->rps.min_delay = hw_min;
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
- dev_priv->rps.rpe_delay);
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
dev_priv->rps.rp_up_masked = false;
dev_priv->rps.rp_down_masked = false;
@@ -4124,7 +4126,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
assert_spin_locked(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -5345,8 +5347,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
bool is_enabled, enable_requested;
uint32_t tmp;
- WARN_ON(dev_priv->pc8.enabled);
-
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5391,7 +5391,6 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- hsw_disable_package_c8(dev_priv);
hsw_set_power_well(dev_priv, power_well, true);
}
@@ -5399,7 +5398,6 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, false);
- hsw_enable_package_c8(dev_priv);
}
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@@ -5577,6 +5575,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well;
int i;
+ intel_runtime_pm_get(dev_priv);
+
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
@@ -5621,6 +5621,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
}
mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
}
static struct i915_power_domains *hsw_pwr;
@@ -5884,15 +5886,14 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
intel_power_domains_resume(dev_priv);
}
-/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
- hsw_disable_package_c8(dev_priv);
+ intel_runtime_pm_get(dev_priv);
}
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
- hsw_enable_package_c8(dev_priv);
+ intel_runtime_pm_put(dev_priv);
}
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -5924,8 +5925,6 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
- dev_priv->pm.suspended = false;
-
if (!HAS_RUNTIME_PM(dev))
return;
@@ -5934,6 +5933,8 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
}
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
@@ -5985,7 +5986,7 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- intel_setup_wm_latency(dev);
+ ilk_setup_wm_latency(dev);
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
@@ -6156,12 +6157,9 @@ void intel_pm_setup(struct drm_device *dev)
mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 1; /* requirements_met */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
+
+ dev_priv->pm.suspended = false;
+ dev_priv->pm.irqs_disabled = false;
}