summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c425
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h189
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c161
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c624
-rw-r--r--drivers/gpu/drm/i915/i915_params.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h500
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c474
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1459
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c678
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c548
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h154
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c37
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c102
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c20
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c160
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c897
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h2
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c444
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h90
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c36
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c212
48 files changed, 6157 insertions, 2423 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
-
-config DRM_I915_UMS
- bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
- depends on DRM_I915 && BROKEN
- default n
- help
- Choose this option if you still need userspace modesetting.
-
- Userspace modesetting is deprecated for quite some time now, so
- enable this only if you have ancient versions of the DDX drivers.
-
- If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683d8bb5..91bd167e1cb7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
+ intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d7954366bd2..dea99d92fb4a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
+ GEN7_L3SQCREG1,
+ GEN7_L3CNTLREG2,
+ GEN7_L3CNTLREG3,
};
static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c689202c40..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
- seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n");
-static int i915_rstdby_delays(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 crstanddelay;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- crstanddelay = I915_READ16(CRSTANDVID);
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
-
- return 0;
-}
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+ IS_BROADWELL(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
@@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
- u32 freq_sts, val;
+ u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
- val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@ out:
return ret;
}
-static int i915_delayfreq_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 delayfreq;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 0; i < 16; i++) {
- delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
- (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
- }
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static inline int MAP_TO_MV(int map)
-{
- return 1250 - (map * 25);
-}
-
-static int i915_inttoext_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 inttoext;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 1; i <= 32; i++) {
- inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
- seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "enabled\n");
- else
- seq_puts(m, "disabled\n");
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915.enable_ips));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
intel_runtime_pm_put(dev_priv);
@@ -1620,26 +1560,6 @@ out:
return ret;
}
-static int i915_gfxec(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
- mutex_unlock(&dev->mode_config.mutex);
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ctx;
int ret, i;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->obj == NULL)
+ if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
- describe_obj(m, ctx->obj);
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
else
- seq_printf(m, " context %d:\n", ctx->id);
+ seq_printf(m, " context %d:\n", ctx->user_handle);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+ seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+ seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+ seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+ dev_priv->psr.busy_frontbuffer_bits);
+ seq_printf(m, "Re-enable work scheduled: %s\n",
+ yesno(work_busy(&dev_priv->psr.work.work)));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "Enabled: %s\n", yesno(enabled));
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev))
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(dev_priv->pm.irqs_disabled));
+ yesno(!intel_irqs_enabled(dev_priv)));
return 0;
}
@@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
+ case POWER_DOMAIN_PLLS:
+ return "PLLS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
@@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- crtc->primary->fb->base.id, crtc->x, crtc->y,
- crtc->primary->fb->width, crtc->primary->fb->height);
+ if (crtc->primary->fb)
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ else
+ seq_puts(m, "\tprimary plane disabled\n");
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
intel_encoder_info(m, intel_crtc, intel_encoder);
}
@@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
+ if (intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ }
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
bool active;
int x, y;
- seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active));
+ yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_addr,
- yesno(active));
+ x, y, crtc->cursor_width, crtc->cursor_height,
+ crtc->cursor_addr, yesno(active));
}
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ int i, j, ret;
+
+ if (!i915_semaphore_is_enabled(dev)) {
+ seq_puts(m, "Semaphores are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_BROADWELL(dev)) {
+ struct page *page;
+ uint64_t *seqno;
+
+ page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+ seqno = (uint64_t *)kmap_atomic(page);
+ for_each_ring(ring, dev_priv, i) {
+ uint64_t offset;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ seq_puts(m, " Last signal:");
+ for (j = 0; j < num_rings; j++) {
+ offset = i * I915_NUM_RINGS + j;
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, " Last wait: ");
+ for (j = 0; j < num_rings; j++) {
+ offset = i + (j * I915_NUM_RINGS);
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ }
+ kunmap_atomic(seqno);
+ } else {
+ seq_puts(m, " Last signal:");
+ for_each_ring(ring, dev_priv, i)
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, "0x%08x\n",
+ I915_READ(ring->semaphore.mbox.signal[j]));
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "\nSync seqno:\n");
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < num_rings; j++) {
+ seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+ seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
+ pll->active, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
enum pipe pipe;
};
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = to_intel_encoder(encoder);
+ if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ continue;
+ intel_dig_port = enc_to_dig_port(encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config.pch_pfit.enabled) {
+ crtc->config.pch_pfit.force_thru = true;
+
+ intel_display_power_get(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.pch_pfit.force_thru) {
+ crtc->config.pch_pfit.force_thru = false;
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+
+ intel_display_power_put(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
uint32_t *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev);
+
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (INTEL_INFO(dev)->gen < 5)
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(&source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
return ret;
@@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
- intel_wait_for_vblank(dev, pipe);
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->active)
+ intel_wait_for_vblank(dev, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
g4x_undo_pipe_scramble_reset(dev, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_undo_trans_edp_pipe_A_crc_wa(dev);
}
return 0;
@@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
- {"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_delayfreq_table", i915_delayfreq_table, 0},
- {"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_semaphore_status", i915_semaphore_status, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d44344140627..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
I915_WRITE(HWS_PGA, 0x1ffff000);
}
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return 0;
}
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
return 0;
}
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
+
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
}
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+ drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
return ret;
}
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->dri1.counter;
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ dev_priv->pm._irqs_disabled = false;
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1375,9 +1379,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
- /* Only enable hotplug handling once the fbdev is fully set up. */
- dev_priv->enable_hotplug_processing = true;
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -1425,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
}
#if IS_ENABLED(CONFIG_FB)
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
+ int ret;
ap = alloc_apertures(1);
if (!ap)
- return;
+ return -ENOMEM;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1441,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
kfree(ap);
+
+ return ret;
}
#else
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
+ return 0;
}
#endif
@@ -1492,10 +1497,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
+ dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
@@ -1594,7 +1600,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL)
return -ENOMEM;
- dev->dev_private = (void *)dev_priv;
+ dev->dev_private = dev_priv;
dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */
@@ -1606,6 +1612,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1664,7 +1671,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gtt;
}
- i915_kick_out_firmware_fb(dev_priv);
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
}
pci_set_master(dev->pdev);
@@ -1717,6 +1728,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->dp_wq == NULL) {
+ DRM_ERROR("Failed to create our dp workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freewq;
+ }
+
intel_irq_init(dev);
intel_uncore_sanitize(dev);
@@ -1792,6 +1810,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->dp_wq);
+out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1892,6 +1912,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1933,7 +1954,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1954,11 +1975,11 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file_priv);
- i915_gem_release(dev, file_priv);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
@@ -2031,7 +2052,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e051c0..6c4b25ce8bb0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
*/
#include <linux/device.h>
+#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
- CHV_DPLL_C_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
- CHV_DPLL_C_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
@@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -482,10 +481,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0)
return i915.semaphores;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
-
- intel_runtime_pm_get(dev_priv);
+ pci_power_t opregion_target_state;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,21 +520,23 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- drm_irq_uninstall(dev);
- dev_priv->enable_hotplug_processing = false;
-
- intel_disable_gt_powersave(dev);
-
/*
* Disable CRTCs directly since we want to preserve sw state
- * for _thaw.
+ * for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc) {
- dev_priv->display.crtc_disable(crtc);
- }
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_suspend(dev);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_disable_interrupts(dev);
+
+ intel_suspend_gt_powersave(dev);
+
intel_modeset_suspend_hw(dev);
}
@@ -548,8 +544,15 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
+ opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ opregion_target_state = PCI_D1;
+#endif
+ intel_opregion_notify_adapter(dev, opregion_target_state);
+
+ intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
- intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -557,6 +560,8 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->suspend_count++;
+ intel_display_set_init_power(dev_priv, false);
+
return 0;
}
@@ -606,7 +611,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_uncore_early_sanitize(dev);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
@@ -639,11 +647,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- drm_irq_install(dev, dev->pdev->irq);
+ intel_runtime_pm_restore_interrupts(dev);
intel_modeset_init_hw(dev);
+ {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -655,7 +671,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
* notifications.
* */
intel_hpd_init(dev);
- dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -678,7 +693,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_opregion_notify_adapter(dev, PCI_D0);
+
return 0;
}
@@ -887,6 +903,7 @@ static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -900,6 +917,9 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+ hsw_enable_pc8(dev_priv);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f7700897dfc..a26eec285da7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
+#define DRIVER_DATE "20140620"
enum pipe {
INVALID_PIPE = -1,
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@ enum hpd_pin {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
struct drm_i915_private;
struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B,
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
};
#define I915_NUM_PLLS 2
@@ -194,6 +201,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
+ uint32_t wrpll;
};
struct intel_shared_dpll {
@@ -204,6 +212,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
-struct intel_ddi_plls {
- int spll_refcount;
- int wrpll1_refcount;
- int wrpll2_refcount;
-};
-
/* Interface history:
*
* 1.1: Original.
@@ -324,6 +328,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring {
bool valid;
@@ -435,8 +440,8 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enable, bool scaled);
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -552,8 +557,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int dpll_offsets[I915_MAX_PIPES];
- int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
@@ -586,28 +589,48 @@ struct i915_ctx_hang_stats {
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_ID 0
+#define DEFAULT_CONTEXT_HANDLE 0
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @file_priv: filp associated with this context (NULL for global default
+ * context).
+ * @hang_stats: information about the role of this context in possible GPU
+ * hangs.
+ * @vm: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ * initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
struct intel_context {
struct kref ref;
- int id;
- bool is_initialized;
+ int user_handle;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_engine_cs *last_ring;
- struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
+ struct {
+ struct drm_i915_gem_object *rcs_state;
+ bool initialized;
+ } legacy_hw_ctx;
+
struct list_head link;
};
struct i915_fbc {
unsigned long size;
+ unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
- struct drm_mm_node *compressed_fb;
+ struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
struct intel_fbc_work {
@@ -635,9 +658,15 @@ struct i915_drrs {
struct intel_connector *connector;
};
+struct intel_dp;
struct i915_psr {
+ struct mutex lock;
bool sink_support;
bool source_ok;
+ struct intel_dp *enabled;
+ bool active;
+ struct delayed_work work;
+ unsigned busy_frontbuffer_bits;
};
enum intel_pch {
@@ -880,6 +909,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2;
};
+struct intel_rps_ei {
+ u32 cz_clock;
+ u32 render_c0;
+ u32 media_c0;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -903,6 +938,9 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u32 cz_freq;
+
+ u32 ei_interrupt_count;
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -910,6 +948,9 @@ struct intel_gen6_power_mgmt {
bool enabled;
struct delayed_work delayed_resume_work;
+ /* manual wa residency calculations */
+ struct intel_rps_ei up_ei, down_ei;
+
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
@@ -1230,6 +1271,7 @@ struct intel_vbt_data {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
} backlight;
/* MIPI DSI */
@@ -1299,7 +1341,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool irqs_disabled;
+ bool _irqs_disabled;
};
enum intel_pipe_crc_source {
@@ -1332,6 +1374,17 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
+struct i915_frontbuffer_tracking {
+ struct mutex lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1363,6 +1416,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1371,6 +1425,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1390,7 +1447,6 @@ struct drm_i915_private {
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct work_struct hotplug_work;
- bool enable_hotplug_processing;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
@@ -1467,7 +1523,6 @@ struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
@@ -1475,6 +1530,9 @@ struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
+
+ struct i915_frontbuffer_tracking fb_tracking;
+
u16 orig_clock;
bool mchbar_need_disable;
@@ -1541,6 +1599,20 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+ u32 long_hpd_port_mask;
+ u32 short_hpd_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1592,6 +1664,28 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -1662,6 +1756,12 @@ struct drm_i915_gem_object {
unsigned int pin_display:1;
/*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+
+ /*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
@@ -1673,6 +1773,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
@@ -1719,6 +1821,10 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
@@ -1940,10 +2046,8 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
- (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
- && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -1998,6 +2102,8 @@ struct drm_i915_cmd_table {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2040,6 +2146,8 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
+ bool mmio_debug;
};
extern struct i915_params i915 __read_mostly;
@@ -2048,12 +2156,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2084,10 +2192,12 @@ extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2235,6 +2345,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
@@ -2404,7 +2516,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
- return c->id == DEFAULT_CONTEXT_ID;
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2435,7 +2547,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
@@ -2445,7 +2557,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2593,8 +2704,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
-extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
-extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+ bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2605,6 +2716,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2700,10 +2813,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
- if (HAS_PCH_SPLIT(dev))
- return CPU_VGACNTRL;
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
+ else if (INTEL_INFO(dev)->gen >= 5)
+ return CPU_VGACNTRL;
else
return VGACNTRL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f247d922e44a..215185050ff1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
-static int
+int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 before, now;
int ret;
- WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+ WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+ if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
@@ -1560,14 +1560,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unpin;
- obj->fault_mappable = true;
-
+ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
pfn >>= PAGE_SHIFT;
- pfn += page_offset;
- /* Finally, remap it using the new GTT offset */
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
unpin:
i915_gem_object_ggtt_unpin(obj);
unlock:
@@ -2051,16 +2066,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
- gfp |= __GFP_IO | __GFP_WAIT;
-
i915_gem_shrink_all(dev_priv);
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_pages;
-
- gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
- gfp &= ~(__GFP_IO | __GFP_WAIT);
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2209,6 +2218,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
+ intel_fb_obj_flush(obj, true);
+
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2318,7 +2329,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ring_get_tail(ring->buffer);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2339,7 +2350,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring);
if (ret)
@@ -2822,6 +2833,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
+ /* Optimization: Avoid semaphore sync when we are sure we already
+ * waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
@@ -2905,8 +2918,6 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
- i915_gem_gtt_finish_object(obj);
-
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
@@ -2917,8 +2928,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list))
+ if (list_empty(&obj->vma_list)) {
+ i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3530,6 +3543,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3551,6 +3566,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3604,6 +3621,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -3940,6 +3960,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4428,13 +4451,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
+ WARN_ON(obj->frontbuffer_bits);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
- i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -4912,6 +4936,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+ mutex_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4973,6 +4999,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits)
+{
+ if (old) {
+ WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+ WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+ old->frontbuffer_bits &= ~frontbuffer_bits;
+ }
+
+ if (new) {
+ WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+ WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+ new->frontbuffer_bits |= frontbuffer_bits;
+ }
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -5055,12 +5098,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
- BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
@@ -5141,8 +5185,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bool was_interruptible;
bool unlock;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3bce9c3..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
- if (ctx->obj) {
+ if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->obj->base.dev))
+ if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
-
- /* XXX: Free up the object before tearing down the address space, in
- * case we're bound in the PPGTT */
- drm_gem_object_unreference(&ctx->obj->base);
}
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
+ if (ctx->legacy_hw_ctx.rcs_state)
+ drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
+static struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret)) {
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return obj;
+}
+
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
@@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) {
- ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
- if (ctx->obj == NULL) {
- ret = -ENOMEM;
+ struct drm_i915_gem_object *obj =
+ i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto err_out;
}
-
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- */
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
- ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret))
- goto err_out;
- }
+ ctx->legacy_hw_ctx.rcs_state = obj;
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
- ret = DEFAULT_CONTEXT_ID;
+ ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
- ctx->id = ret;
+ ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->obj) {
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default
* context.
*/
- ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
err_unpin:
- if (is_global_default_ctx && ctx->obj)
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+ i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
@@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
+ struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */
- if (ring->last_context == dctx)
+ if (lctx == dctx)
continue;
- if (!ring->last_context)
+ if (!lctx)
continue;
- if (dctx->obj && i == RCS) {
- WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
- dctx->obj->base.write_domain = 0;
- dctx->obj->active = 0;
+ dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
+ dctx->legacy_hw_ctx.rcs_state->active = 0;
}
- i915_gem_context_unreference(ring->last_context);
+ if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+ i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+ i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
}
@@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
- if (dctx->obj) {
+ if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
@@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
- WARN_ON(dctx->obj->active);
- i915_gem_object_ggtt_unpin(dctx->obj);
+ WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->obj == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && from->last_ring == ring && !to->remap_slice)
+ if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->obj,
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
@@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring,
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
- if (!to->obj->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
- if (!to->is_initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+ from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->obj->dirty = 1;
- BUG_ON(from->obj->ring != ring);
+ from->legacy_hw_ctx.rcs_state->dirty = 1;
+ BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
- uninitialized = !to->is_initialized && from == NULL;
- to->is_initialized = true;
+ uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->last_ring = ring;
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@ done:
unpin_out:
if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->obj);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->obj == NULL) { /* We have the fake context */
+ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
@@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- args->ctx_id = ctx->id;
+ args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0;
@@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
- if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133f93e8..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- /* check for potential scanout */
- if (i915_gem_obj_ggtt_bound(obj) &&
- i915_gem_obj_to_ggtt(obj)->pin_count)
- intel_mark_fb_busy(obj, ring);
+
+ intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
+{
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 exec_len;
+ int instp_mode;
+ u32 instp_mask;
+ int i, ret = 0;
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(cliprects,
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ if (ret)
+ goto error;
+
+ ret = i915_switch_context(ring, ctx);
+ if (ret)
+ goto error;
+
+ instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ instp_mask = I915_EXEC_CONSTANTS_MASK;
+ switch (instp_mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (instp_mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (ring == &dev_priv->ring[RCS] &&
+ instp_mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto error;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = instp_mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto error;
+ }
+
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto error;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto error;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+error:
+ kfree(cliprects);
+ return ret;
+}
+
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
@@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
- struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset, exec_len;
- u32 mask, flags;
- int ret, mode, i;
+ u64 exec_start = args->batch_start_offset;
+ u32 flags;
+ int ret;
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
@@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- mask = I915_EXEC_CONSTANTS_MASK;
- switch (mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (mode != 0 && ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen > 5 &&
- mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
- mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
- return -EINVAL;
- }
-
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- if (args->num_cliprects != 0) {
- if (ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("clip rectangles are only valid with the render ring\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 5) {
- DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
- return -EINVAL;
- }
-
- if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
- DRM_DEBUG("execbuf with %u cliprects\n",
- args->num_cliprects);
- return -EINVAL;
- }
-
- cliprects = kcalloc(args->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (copy_from_user(cliprects,
- to_user_ptr(args->cliprects_ptr),
- sizeof(*cliprects)*args->num_cliprects)) {
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- } else {
- if (args->DR4 == 0xffffffff) {
- DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
- args->DR4 = 0;
- }
-
- if (args->DR1 || args->DR4 || args->cliprects_ptr) {
- DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
- return -EINVAL;
- }
- }
-
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
+ args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
- ret = i915_switch_context(ring, ctx);
- if (ret)
- goto err;
-
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- goto err;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring, mask << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
- if (ret)
- goto err;
- }
-
-
- exec_len = args->batch_len;
- if (cliprects) {
- for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
- args->DR1, args->DR4);
- if (ret)
- goto err;
-
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
- } else {
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
-
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-
- i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- kfree(cliprects);
-
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- struct drm_i915_gem_exec_object2 *user_exec_list =
+ struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde703364..5188936bca0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
#endif
+ /* Early VLV doesn't have this */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ dev->pdev->revision < 0xb) {
+ DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+ return 0;
+ }
+
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
@@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
- pte |= BYT_PTE_WRITEABLE;
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pd_entry;
int pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true);
+ cache_level, true, flags);
+
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ /* Currently applicable only to VLV */
+ if (vma->obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level);
+ cache_level, flags);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1453,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1465,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
@@ -1464,7 +1477,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true));
+ vm->pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1531,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1580,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ /* Currently applicable only to VLV */
+ if (obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1583,7 +1600,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
@@ -1595,7 +1612,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06be3cb..8d6f7c18c404 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@ struct i915_vma {
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid); /* Create a valid PTE */
+ bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 34894b573064..e60be3f552a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct i915_render_state {
+struct render_state {
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
- unsigned long ggtt_offset;
- u32 *batch;
- u32 size;
- u32 len;
+ u64 ggtt_offset;
+ int gen;
};
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
- struct i915_render_state *so;
- struct page *page;
- int ret;
-
- so = kzalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return ERR_PTR(-ENOMEM);
-
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL) {
- ret = -ENOMEM;
- goto free;
- }
- so->size = 4096;
-
- ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
- if (ret)
- goto free_gem;
-
- BUG_ON(so->obj->pages->nents != 1);
- page = sg_page(so->obj->pages->sgl);
-
- so->batch = kmap(page);
- if (!so->batch) {
- ret = -ENOMEM;
- goto unpin;
- }
-
- so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
- return so;
-unpin:
- i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
- drm_gem_object_unreference(&so->obj->base);
-free:
- kfree(so);
- return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
- kunmap(kmap_to_page(so->batch));
- i915_gem_object_ggtt_unpin(so->obj);
- drm_gem_object_unreference(&so->obj->base);
- kfree(so);
-}
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_setup(const int gen,
- const struct intel_renderstate_rodata *rodata,
- struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
{
- const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
- u32 reloc_index = 0;
- u32 * const d = so->batch;
- unsigned int i = 0;
int ret;
- if (!rodata || rodata->batch_items * 4 > so->size)
+ so->gen = INTEL_INFO(dev)->gen;
+ so->rodata = render_state_get_rodata(dev, so->gen);
+ if (so->rodata == NULL)
+ return 0;
+
+ if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+ return 0;
+
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+ return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+ const struct intel_renderstate_rodata *rodata = so->rodata;
+ unsigned int i = 0, reloc_index = 0;
+ struct page *page;
+ u32 *d;
+ int ret;
+
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
+ page = sg_page(so->obj->pages->sgl);
+ d = kmap(page);
+
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
- if (reloc_index < rodata->reloc_items &&
- i * 4 == rodata->reloc[reloc_index]) {
-
- s += goffset & 0xffffffff;
-
- /* We keep batch offsets max 32bit */
- if (gen >= 8) {
+ if (i * 4 == rodata->reloc[reloc_index]) {
+ u64 r = s + so->ggtt_offset;
+ s = lower_32_bits(r);
+ if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
- d[i] = s;
- i++;
- s = (goffset & 0xffffffff00000000ull) >> 32;
+ d[i++] = s;
+ s = upper_32_bits(r);
}
reloc_index++;
}
- d[i] = s;
- i++;
+ d[i++] = s;
}
+ kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
- if (rodata->reloc_items != reloc_index) {
- DRM_ERROR("not all relocs resolved, %d out of %d\n",
- reloc_index, rodata->reloc_items);
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
- so->len = rodata->batch_items * 4;
-
return 0;
}
+static void render_state_fini(struct render_state *so)
+{
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+}
+
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
- const int gen = INTEL_INFO(ring->dev)->gen;
- struct i915_render_state *so;
- const struct intel_renderstate_rodata *rodata;
+ struct render_state so;
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
- rodata = render_state_get_rodata(ring->dev, gen);
- if (rodata == NULL)
- return 0;
+ ret = render_state_init(&so, ring->dev);
+ if (ret)
+ return ret;
- so = render_state_alloc(ring->dev);
- if (IS_ERR(so))
- return PTR_ERR(so);
+ if (so.rodata == NULL)
+ return 0;
- ret = render_state_setup(gen, rodata, so);
+ ret = render_state_setup(&so);
if (ret)
goto out;
ret = ring->dispatch_execbuffer(ring,
- i915_gem_obj_ggtt_offset(so->obj),
- so->len,
+ so.ggtt_offset,
+ so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
- render_state_free(so);
+ render_state_fini(&so);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7465ab0fd396..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int i915_setup_compression(struct drm_device *dev, int size)
+static int find_compression_threshold(struct drm_device *dev,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int compression_threshold = 1;
int ret;
- compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
- if (!compressed_fb)
- goto err_llb;
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
- /* Try to over-allocate to reduce reallocations and fragmentation */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret)
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret && INTEL_INFO(dev)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -184,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + compressed_fb->start);
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.compressed_fb = compressed_fb;
- dev_priv->fbc.size = size;
+ dev_priv->fbc.size = size / dev_priv->fbc.threshold;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -199,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb:
kfree(compressed_llb);
- drm_mm_remove_node(compressed_fb);
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
- kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -219,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
- return i915_setup_compression(dev, size);
+ return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -229,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->fbc.compressed_fb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_fb);
- kfree(dev_priv->fbc.compressed_fb);
- }
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -336,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
+ .release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
@@ -496,13 +540,3 @@ err_out:
drm_gem_object_unreference(&obj->base);
return NULL;
}
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
- if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct list_head linear;
struct drm_device *dev;
struct mm_struct *mm;
struct work_struct work;
unsigned long count;
unsigned long serial;
+ bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mmu;
struct interval_tree_node it;
+ struct list_head link;
struct drm_i915_gem_object *obj;
+ bool is_linear;
};
+static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ unsigned long end;
+
+ mutex_lock(&dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ end = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return end;
+}
+
+static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_object *mmu;
+ unsigned long serial;
+
+restart:
+ serial = mn->serial;
+ list_for_each_entry(mmu, &mn->linear, link) {
+ struct drm_i915_gem_object *obj;
+
+ if (mmu->it.last < start || mmu->it.start > end)
+ continue;
+
+ obj = mmu->obj;
+ drm_gem_object_reference(&obj->base);
+ spin_unlock(&mn->lock);
+
+ cancel_userptr(obj);
+
+ spin_lock(&mn->lock);
+ if (serial != mn->serial)
+ goto restart;
+ }
+
+ return NULL;
+}
+
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
{
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it = NULL;
+ unsigned long next = start;
unsigned long serial = 0;
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
- while (start < end) {
- struct drm_i915_gem_object *obj;
+ while (next < end) {
+ struct drm_i915_gem_object *obj = NULL;
- obj = NULL;
spin_lock(&mn->lock);
- if (serial == mn->serial)
- it = interval_tree_iter_next(it, start, end);
+ if (mn->has_linear)
+ it = invalidate_range__linear(mn, mm, start, end);
+ else if (serial == mn->serial)
+ it = interval_tree_iter_next(it, next, end);
else
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (obj == NULL)
return;
- mutex_lock(&mn->dev->struct_mutex);
- /* Cancel any active worker and force us to re-evaluate gup */
- obj->userptr.work = NULL;
-
- if (obj->pages != NULL) {
- struct drm_i915_private *dev_priv = to_i915(mn->dev);
- struct i915_vma *vma, *tmp;
- bool was_interruptible;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
- WARN_ON(i915_gem_object_put_pages(obj));
-
- dev_priv->mm.interruptible = was_interruptible;
- }
-
- start = obj->userptr.ptr + obj->base.size;
-
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&mn->dev->struct_mutex);
+ next = cancel_userptr(obj);
}
}
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
mmu->mm = mm;
mmu->objects = RB_ROOT;
mmu->count = 0;
- mmu->serial = 0;
+ mmu->serial = 1;
+ INIT_LIST_HEAD(&mmu->linear);
+ mmu->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
mmu->serial = 1;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
+{
+ struct i915_mmu_object *mn;
+
+ list_for_each_entry(mn, &mmu->linear, link)
+ if (mn->is_linear)
+ return true;
+
+ return false;
+}
+
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
lockdep_assert_held(&mmu->dev->struct_mutex);
spin_lock(&mmu->lock);
- interval_tree_remove(&mn->it, &mmu->objects);
+ list_del(&mn->link);
+ if (mn->is_linear)
+ mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
+ else
+ interval_tree_remove(&mn->it, &mmu->objects);
__i915_mmu_notifier_update_serial(mmu);
spin_unlock(&mmu->lock);
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
*/
i915_gem_retire_requests(mmu->dev);
- /* Disallow overlapping userptr objects */
spin_lock(&mmu->lock);
it = interval_tree_iter_first(&mmu->objects,
mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
+ *
+ * If we do have an overlap, we cannot use the interval tree
+ * for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
- ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
- } else {
+ if (!obj->userptr.workers)
+ mmu->has_linear = mn->is_linear = true;
+ else
+ ret = -EAGAIN;
+ } else
interval_tree_insert(&mn->it, &mmu->objects);
+
+ if (ret == 0) {
+ list_add(&mn->link, &mmu->linear);
__i915_mmu_notifier_update_serial(mmu);
- ret = 0;
}
spin_unlock(&mmu->lock);
mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* We impose several restrictions upon the memory being mapped
* into the GPU.
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
- * 2. It cannot overlap any other userptr object in the same address space.
- * 3. It must be normal system memory, not a pointer into another map of IO
+ * 2. It must be normal system memory, not a pointer into another map of IO
* space (e.g. it must not be a GTT mmapping of another object).
- * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
* that is we limit the size to the total size of the GTT.
- * 5. The bo is marked as being snoopable. The backing pages are left
+ * 4. The bo is marked as being snoopable. The backing pages are left
* accessible directly by the CPU, but reads and writes by the GPU may
* incur the cost of a snoop (unless you have an LLC architecture).
*
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..0b3f69439451 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
+ struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if ((obj = error->semaphore_obj)) {
+ err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ elt * 4,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ }
+ }
+
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests);
}
+ i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error->display);
@@ -746,7 +758,59 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_engine_cs *to;
+ int i;
+
+ if (!i915_semaphore_is_enabled(dev_priv->dev))
+ return;
+
+ if (!error->semaphore_obj)
+ error->semaphore_obj =
+ i915_error_object_create(dev_priv,
+ dev_priv->semaphore_obj,
+ &dev_priv->gtt.base);
+
+ for_each_ring(to, dev_priv, i) {
+ int idx;
+ u16 signal_offset;
+ u32 *tmp;
+
+ if (ring == to)
+ continue;
+
+ signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
+ tmp = error->semaphore_obj->pages[0];
+ idx = intel_ring_sync_index(ring, to);
+
+ ering->semaphore_mboxes[idx] = tmp[signal_offset];
+ ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ }
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+ if (HAS_VEBOX(dev_priv->dev)) {
+ ering->semaphore_mboxes[2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ }
+}
+
static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
@@ -755,18 +819,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
- ering->semaphore_mboxes[0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
- }
-
- if (HAS_VEBOX(dev)) {
- ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ else
+ gen6_record_semaphore_state(dev_priv, ring, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_ggtt_bound(obj))
+ continue;
+
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
@@ -895,7 +954,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring);
if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05c84f3f091..6ef9d6fabf80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (!intel_irqs_enabled(dev_priv))
return;
if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
POSTING_READ(GTIMR);
}
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
}
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
return true;
}
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, dig_port_work);
+ unsigned long irqflags;
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i, ret;
+ u32 old_bits = 0;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ long_port_mask = dev_priv->long_hpd_port_mask;
+ dev_priv->long_hpd_port_mask = 0;
+ short_port_mask = dev_priv->short_hpd_port_mask;
+ dev_priv->short_hpd_port_mask = 0;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == true) {
+ /* if we get true fallback to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->hpd_event_bits |= old_bits;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ schedule_work(&dev_priv->hotplug_work);
+ }
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
bool changed = false;
u32 hpd_event_bits;
- /* HPD irq before everything is fully set up. */
- if (!dev_priv->enable_hotplug_processing)
- return;
-
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1152,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
@@ -1218,10 +1265,138 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_notify_mmio_flip(ring);
+
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *rps_ei)
+{
+ u32 cz_ts, cz_freq_khz;
+ u32 render_count, media_count;
+ u32 elapsed_render, elapsed_media, elapsed_time;
+ u32 residency = 0;
+
+ cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+ render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+ media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+ if (rps_ei->cz_clock == 0) {
+ rps_ei->cz_clock = cz_ts;
+ rps_ei->render_c0 = render_count;
+ rps_ei->media_c0 = media_count;
+
+ return dev_priv->rps.cur_freq;
+ }
+
+ elapsed_time = cz_ts - rps_ei->cz_clock;
+ rps_ei->cz_clock = cz_ts;
+
+ elapsed_render = render_count - rps_ei->render_c0;
+ rps_ei->render_c0 = render_count;
+
+ elapsed_media = media_count - rps_ei->media_c0;
+ rps_ei->media_c0 = media_count;
+
+ /* Convert all the counters into common unit of milli sec */
+ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+ elapsed_render /= cz_freq_khz;
+ elapsed_media /= cz_freq_khz;
+
+ /*
+ * Calculate overall C0 residency percentage
+ * only if elapsed time is non zero
+ */
+ if (elapsed_time) {
+ residency =
+ ((max(elapsed_render, elapsed_media) * 100)
+ / elapsed_time);
+ }
+
+ return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+ u32 residency_C0_up = 0, residency_C0_down = 0;
+ u8 new_delay, adj;
+
+ dev_priv->rps.ei_interrupt_count++;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+ if (dev_priv->rps.up_ei.cz_clock == 0) {
+ vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+ vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+ return dev_priv->rps.cur_freq;
+ }
+
+
+ /*
+ * To down throttle, C0 residency should be less than down threshold
+ * for continous EI intervals. So calculate down EI counters
+ * once in VLV_INT_COUNT_FOR_DOWN_EI
+ */
+ if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+ dev_priv->rps.ei_interrupt_count = 0;
+
+ residency_C0_down = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.down_ei);
+ } else {
+ residency_C0_up = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.up_ei);
+ }
+
+ new_delay = dev_priv->rps.cur_freq;
+
+ adj = dev_priv->rps.last_adj;
+ /* C0 residency is greater than UP threshold. Increase Frequency */
+ if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+
+ if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+
+ } else if (!dev_priv->rps.ei_interrupt_count &&
+ (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ /*
+ * This means, C0 residency is less than down threshold over
+ * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+ */
+ if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+ }
+
+ return new_delay;
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1232,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (IS_BROADWELL(dev_priv->dev))
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1252,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
- else
- adj = 1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
/*
@@ -1268,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
- else
- adj = -1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_freq;
@@ -1372,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1386,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
@@ -1441,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1458,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1465,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
- I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1473,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1480,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
- I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1488,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
- ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1499,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
- I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1514,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
+static int ilk_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 0;
+ case PORT_C:
+ return 8;
+ case PORT_D:
+ return 16;
+ }
+}
+
+static int g4x_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 17;
+ case PORT_C:
+ return 19;
+ case PORT_D:
+ return 21;
+ }
+}
+
+static inline enum port get_port_from_pin(enum hpd_pin pin)
+{
+ switch (pin) {
+ case HPD_PORT_B:
+ return PORT_B;
+ case HPD_PORT_C:
+ return PORT_C;
+ case HPD_PORT_D:
+ return PORT_D;
+ default:
+ return PORT_A; /* no hpd */
+ }
+}
+
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
+ u32 dig_hotplug_reg,
const u32 *hpd)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
+ enum port port;
bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ u32 dig_shift;
+ u32 dig_port_mask = 0;
if (!hotplug_trigger)
return;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_trigger);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg);
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ if (!(hpd[i] & hotplug_trigger))
+ continue;
+
+ port = get_port_from_pin(i);
+ if (port && dev_priv->hpd_irq_port[port]) {
+ bool long_hpd;
+ if (IS_G4X(dev)) {
+ dig_shift = g4x_port_to_hotplug_shift(port);
+ long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ } else {
+ dig_shift = ilk_port_to_hotplug_shift(port);
+ long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ }
+
+ DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ /* for long HPD pulses we want to have the digital queue happen,
+ but we still want HPD storm detection to function. */
+ if (long_hpd) {
+ dev_priv->long_hpd_port_mask |= (1 << port);
+ dig_port_mask |= hpd[i];
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->short_hpd_port_mask |= (1 << port);
+ hotplug_trigger &= ~hpd[i];
+ }
+ queue_dig = true;
+ }
+ }
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
@@ -1550,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
- dev_priv->hpd_event_bits |= (1 << i);
+ if (!(dig_port_mask & hpd[i])) {
+ dev_priv->hpd_event_bits |= (1 << i);
+ queue_hp = true;
+ }
+
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1579,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
- schedule_work(&dev_priv->hotplug_work);
+ if (queue_dig)
+ queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1700,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1809,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- if (IS_G4X(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ if (hotplug_status) {
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
- }
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
+ }
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+ }
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1839,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
while (true) {
- iir = I915_READ(VLV_IIR);
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
+ if (gt_iir)
+ I915_WRITE(GTIIR, gt_iir);
+
pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir)
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ iir = I915_READ(VLV_IIR);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
ret = IRQ_HANDLED;
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- valleyview_pipestat_irq_handler(dev, iir);
-
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
-
+ if (gt_iir)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
}
out:
@@ -1882,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (master_ctl == 0 && iir == 0)
break;
+ ret = IRQ_HANDLED;
+
I915_WRITE(GEN8_MASTER_IRQ, 0);
- gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ /* Find, clear, then process each source of interrupt */
- valleyview_pipestat_irq_handler(dev, iir);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
- /* Consume port. Then clear IIR or we'll miss events */
- i9xx_hpd_irq_handler(dev);
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- ret = IRQ_HANDLED;
}
return ret;
@@ -1907,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+ u32 dig_hotplug_reg;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2014,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2132,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
}
}
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -2159,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
}
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
- gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
+ gen6_rps_irq_handler(dev_priv, pm_iir);
}
}
@@ -2215,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
+ /* Find, clear, then process each source of interrupt */
+
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp & GEN8_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Port interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else
+ DRM_ERROR("Unexpected DE Port interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(pipe) {
@@ -2254,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
continue;
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir & GEN8_PIPE_VBLANK)
- intel_pipe_handle_vblank(dev, pipe);
-
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ intel_pipe_handle_vblank(dev, pipe);
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+ }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2292,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
- }
+ cpt_irq_handler(dev, pch_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
}
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2753,12 +3054,7 @@ static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
if (INTEL_INFO(dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return false;
+ return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2767,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return NULL;
+ for_each_ring(signaller, dev_priv, i) {
+ if (ring == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ return signaller;
+ }
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
@@ -2792,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
}
}
- DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
- ring->id, ipehr);
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+ ring->id, ipehr, offset);
return NULL;
}
@@ -2803,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
- int i;
+ u64 offset = 0;
+ int i, backwards;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2812,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
- * dwords. Note that we don't care about ACTHD here since that might
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
- for (i = 4; i; --i) {
+ for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
@@ -2838,7 +3138,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(ring, ipehr);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ offset = ioread32(ring->buffer->virtual_start + head + 12);
+ offset <<= 32;
+ offset = ioread32(ring->buffer->virtual_start + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *ring)
@@ -3159,7 +3464,9 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(pipe)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3168,6 +3475,18 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B]);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C]);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3492,8 +3811,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(pipe)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
- de_pipe_enables);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
@@ -4324,12 +4646,17 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ if (IS_VALLEYVIEW(dev))
+ /* WaGsvRC0ResidenncyMethod:VLV */
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
@@ -4339,6 +4666,9 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* Haven't installed the IRQ handler yet */
+ dev_priv->pm._irqs_disabled = true;
+
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4426,7 +4756,9 @@ void intel_hpd_init(struct drm_device *dev)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -4444,7 +4776,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev->driver->irq_uninstall(dev);
- dev_priv->pm.irqs_disabled = true;
+ dev_priv->pm._irqs_disabled = true;
}
/* Restore interrupts so we can recover from runtime PM. */
@@ -4452,7 +4784,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2afa17dc..62ee8308d682 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
.enable_fbc = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 0,
+ .enable_psr = 1,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
@@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
+ .use_mmio_flip = 0,
+ .mmio_debug = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -117,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
@@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+ "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+MODULE_PARM_DESC(mmio_debug,
+ "Enable the MMIO debug code (default: false). This may negatively "
+ "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a5bab61bfc00..fe5c27630e95 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+ (pipe) == PIPE_B ? (b) : (c))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_POLL (1<<15)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,10 +531,21 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define PUNIT_GPU_STATUS_REG 0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
+
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
@@ -540,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
+#define VLV_RP_UP_EI_THRESHOLD 90
+#define VLV_RP_DOWN_EI_THRESHOLD 70
+#define VLV_INT_COUNT_FOR_DOWN_EI 5
+
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -574,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
+#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
+#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
+#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
+#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/**
* DOC: DPIO
@@ -761,6 +788,8 @@ enum punit_power_well {
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
+#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
+#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
#define _VLV_PCS01_DW8_CH0 0x0220
@@ -869,6 +898,16 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_CMN_DW5_CH0 0x8114
+#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
+#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
+#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
+#define CHV_BUFRIGHTENA1_MASK (3 << 20)
+#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
+#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
+#define CHV_BUFLEFTENA1_FORCE (3 << 22)
+#define CHV_BUFLEFTENA1_MASK (3 << 22)
+
#define _CHV_CMN_DW13_CH0 0x8134
#define _CHV_CMN_DW0_CH1 0x8080
#define DPIO_CHV_S1_DIV_SHIFT 21
@@ -883,8 +922,21 @@ enum punit_power_well {
#define _CHV_CMN_DW1_CH1 0x8084
#define DPIO_AFC_RECAL (1 << 14)
#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+#define _CHV_CMN_DW19_CH0 0x814c
+#define _CHV_CMN_DW6_CH1 0x8098
+#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
#define CHV_CMN_DW30 0x8178
#define DPIO_LRC_BYPASS (1 << 3)
@@ -933,6 +985,7 @@ enum punit_power_well {
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
@@ -1170,6 +1223,8 @@ enum punit_power_well {
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
@@ -1570,11 +1625,10 @@ enum punit_power_well {
/*
* Clock control & power management
*/
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1662,11 +1716,10 @@ enum punit_power_well {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2231,7 +2284,7 @@ enum punit_power_well {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
-
+#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -2376,6 +2429,7 @@ enum punit_power_well {
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
+#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
@@ -2533,8 +2587,14 @@ enum punit_power_well {
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -2588,7 +2648,7 @@ enum punit_power_well {
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X 0x61154
+#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
@@ -4630,6 +4690,8 @@ enum punit_power_well {
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3CNTLREG2 0xB020
+#define GEN7_L3CNTLREG3 0xB024
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
@@ -4876,8 +4938,7 @@ enum punit_power_well {
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
@@ -4890,6 +4951,7 @@ enum punit_power_well {
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
@@ -4898,12 +4960,19 @@ enum punit_power_well {
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
+
#define VLV_TVIDEO_DIP_CTL(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+ VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+ VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
@@ -5334,6 +5403,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -5471,6 +5541,12 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define CHV_CZ_CLOCK_FREQ_MODE_200 200
+#define CHV_CZ_CLOCK_FREQ_MODE_267 267
+#define CHV_CZ_CLOCK_FREQ_MODE_320 320
+#define CHV_CZ_CLOCK_FREQ_MODE_333 333
+#define CHV_CZ_CLOCK_FREQ_MODE_400 400
+
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
@@ -5481,6 +5557,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
+#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
@@ -5489,6 +5567,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
+#define VLV_RENDER_C0_COUNT_REG 0x138118
+#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -5723,6 +5803,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5743,6 +5824,7 @@ enum punit_power_well {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_BFI_ENABLE (1<<4)
/* DisplayPort Transport Control */
@@ -5752,6 +5834,7 @@ enum punit_power_well {
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_FORCE_ACT (1<<25)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
@@ -5766,15 +5849,19 @@ enum punit_power_well {
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_ACT_SENT (1<<24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
-/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5784,16 +5871,6 @@ enum punit_power_well {
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-/* Broadwell */
-#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
-#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
-#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
-#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
-#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
@@ -5861,10 +5938,12 @@ enum punit_power_well {
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
-#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+#define WRPLL_PLL_SSC (1<<28)
+#define WRPLL_PLL_NON_SSC (2<<28)
+#define WRPLL_PLL_LCPLL (3<<28)
+#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5883,6 +5962,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
@@ -5924,7 +6004,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
-#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
@@ -6005,7 +6088,8 @@ enum punit_power_well {
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+ _MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -6047,18 +6131,20 @@ enum punit_power_well {
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
+ _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+ _MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6066,12 +6152,14 @@ enum punit_power_well {
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
+ _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
+ _MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6105,9 +6193,10 @@ enum punit_power_well {
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6128,78 +6217,94 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+ _MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+ _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+ _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+ _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+ _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
-
-#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
-#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+ _MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6208,27 +6313,31 @@ enum punit_power_well {
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
+ _MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+ _MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6236,9 +6345,10 @@ enum punit_power_well {
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+ _MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6248,28 +6358,33 @@ enum punit_power_well {
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+ _MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+ _MIPIB_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+ _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+ _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+ _MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6280,9 +6395,10 @@ enum punit_power_well {
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6298,16 +6414,18 @@ enum punit_power_well {
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+ _MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6318,34 +6436,41 @@ enum punit_power_well {
#define PREPARE_COUNT_MASK (0x3f << 0)
/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+ _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
+ _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+ _MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -6359,9 +6484,10 @@ enum punit_power_well {
/* MIPI adapter registers */
-#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
+ _MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -6373,50 +6499,52 @@ enum punit_power_well {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
-#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+ _MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+ _MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+ _MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
- (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+ (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
+ _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
intel_runtime_pm_get(dev_priv);
- /* On VLV, residency time is in CZ units rather than 1.28us */
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 clkctl2;
+ u32 reg, czcount_30ns;
- clkctl2 = I915_READ(VLV_CLK_CTL2) >>
- CLK_CTL2_CZCOUNT_30NS_SHIFT;
- if (!clkctl2) {
- WARN(!clkctl2, "bogus CZ count value");
+ if (IS_CHERRYVIEW(dev))
+ reg = CHV_CLK_CTL1;
+ else
+ reg = VLV_CLK_CTL2;
+
+ czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+
+ if (!czcount_30ns) {
+ WARN(!czcount_30ns, "bogus CZ count value");
ret = 0;
goto out;
}
- units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+
+ units = 0;
+ div = 1000000ULL;
+
+ if (IS_CHERRYVIEW(dev)) {
+ /* Special case for 320Mhz */
+ if (czcount_30ns == 1) {
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ /* chv counts are one less */
+ czcount_30ns += 1;
+ }
+ }
+
+ if (units == 0)
+ units = DIV_ROUND_UP_ULL(30ULL * bias,
+ (u64)czcount_30ns);
+
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- div = 1000000ULL * bias;
+ div = div * bias;
}
raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
} else {
BUG();
}
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..608ed302f24d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- entry->min_brightness,
+ dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 5a045d3bd77e..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
+static void hsw_crt_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
+
+static void hsw_crt_post_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev)) {
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
+ }
return true;
}
@@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
- intel_runtime_pm_get(dev_priv);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
@@ -685,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -860,6 +884,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.pre_enable = hsw_crt_pre_enable;
+ crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -869,7 +895,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7c769f..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
+ 0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
- 0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
+ 0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
- 0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
+ return intel_dig_port->port;
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
+ WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+}
+
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t val;
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- plls->spll_refcount--;
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Disabling SPLL\n");
- val = I915_READ(SPLL_CTL);
- WARN_ON(!(val & SPLL_PLL_ENABLE));
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
- }
- break;
- case PORT_CLK_SEL_WRPLL1:
- plls->wrpll1_refcount--;
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 1\n");
- val = I915_READ(WRPLL_CTL1);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL1);
- }
- break;
- case PORT_CLK_SEL_WRPLL2:
- plls->wrpll2_refcount--;
- if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 2\n");
- val = I915_READ(WRPLL_CTL2);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL2);
- }
- break;
- }
-
- WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
- WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
- WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
-
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
@@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & SPLL_PLL_REF_MASK) {
- case SPLL_PLL_SSC:
- case SPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_PLL_REF_MASK) {
+ case WRPLL_PLL_SSC:
+ case WRPLL_PLL_NON_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case SPLL_PLL_LCPLL:
+ case WRPLL_PLL_LCPLL:
refclk = LC_FREQ;
break;
default:
@@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(encoder);
int link_clock = 0;
u32 val, pll;
- val = I915_READ(PORT_CLK_SEL(port));
+ val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
@@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
- enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock;
- intel_ddi_put_crtc_pll(crtc);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_put_shared_dpll(intel_crtc);
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case DP_LINK_BW_2_7:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case DP_LINK_BW_5_4:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- default:
- DRM_ERROR("Link bandwidth %d unsupported\n",
- intel_dp->link_bw);
- return false;
- }
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- uint32_t reg, val;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_shared_dpll *pll;
+ uint32_t val;
unsigned p, n2, r2;
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- if (val == I915_READ(WRPLL_CTL1)) {
- DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (val == I915_READ(WRPLL_CTL2)) {
- DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else {
- DRM_ERROR("No WRPLLs available!\n");
- return false;
- }
-
- DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, p, n2, r2);
-
- if (reg == WRPLL_CTL1) {
- plls->wrpll1_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
- } else {
- plls->wrpll2_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
- }
+ intel_crtc->config.dpll_hw_state.wrpll = val;
- } else if (type == INTEL_OUTPUT_ANALOG) {
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
- pipe_name(pipe));
- plls->spll_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
- } else {
- DRM_ERROR("SPLL already in use\n");
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
return false;
}
- } else {
- WARN(1, "Invalid DDI encoder type %d\n", type);
- return false;
+ intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
-/*
- * To be called after intel_ddi_pll_select(). That one selects the PLL to be
- * used, this one actually enables the PLL.
- */
-void intel_ddi_pll_enable(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- int clock = crtc->config.port_clock;
- uint32_t reg, cur_val, new_val;
- int refcount;
- const char *pll_name;
- uint32_t enable_bit = (1 << 31);
- unsigned int p, n2, r2;
-
- BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
- BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
-
- switch (crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_LCPLL_2700:
- case PORT_CLK_SEL_LCPLL_1350:
- case PORT_CLK_SEL_LCPLL_810:
- /*
- * LCPLL should always be enabled at this point of the mode set
- * sequence, so nothing to do.
- */
- return;
-
- case PORT_CLK_SEL_SPLL:
- pll_name = "SPLL";
- reg = SPLL_CTL;
- refcount = plls->spll_refcount;
- new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SSC;
- break;
-
- case PORT_CLK_SEL_WRPLL1:
- case PORT_CLK_SEL_WRPLL2:
- if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
- pll_name = "WRPLL1";
- reg = WRPLL_CTL1;
- refcount = plls->wrpll1_refcount;
- } else {
- pll_name = "WRPLL2";
- reg = WRPLL_CTL2;
- refcount = plls->wrpll2_refcount;
- }
-
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
-
- break;
-
- case PORT_CLK_SEL_NONE:
- WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
- return;
- default:
- WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
- return;
- }
-
- cur_val = I915_READ(reg);
-
- WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
- if (refcount == 1) {
- WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
- I915_WRITE(reg, new_val);
- POSTING_READ(reg);
- udelay(20);
- } else {
- WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
- }
-}
-
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
@@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t temp;
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (state == true)
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ else
+ temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+ if (IS_HASWELL(dev) &&
+ (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
@@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
@@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
- case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ /* if the transcoder is in MST state then
+ * connector isn't connected */
+ return false;
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+ return false;
+
*pipe = i;
return true;
}
@@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t temp, ret;
- enum port port = I915_MAX_PORTS;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int i;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- port = PORT_A;
- } else {
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- temp &= TRANS_DDI_PORT_MASK;
-
- for (i = PORT_B; i <= PORT_E; i++)
- if (temp == TRANS_DDI_SELECT_PORT(i))
- port = i;
- }
-
- if (port == I915_MAX_PORTS) {
- WARN(1, "Pipe %c enabled on an unknown port\n",
- pipe_name(pipe));
- ret = PORT_CLK_SEL_NONE;
- } else {
- ret = I915_READ(PORT_CLK_SEL(port));
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
- "0x%08x\n", pipe_name(pipe), port_name(port),
- ret);
- }
-
- return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *intel_crtc;
-
- dev_priv->ddi_plls.spll_refcount = 0;
- dev_priv->ddi_plls.wrpll1_refcount = 0;
- dev_priv->ddi_plls.wrpll2_refcount = 0;
-
- for_each_pipe(pipe) {
- intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (!intel_crtc->active) {
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
- continue;
- }
-
- intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
- pipe);
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- dev_priv->ddi_plls.spll_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL1:
- dev_priv->ddi_plls.wrpll1_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL2:
- dev_priv->ddi_plls.wrpll2_refcount++;
- break;
- }
- }
-}
-
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ intel_ddi_init_dp_buf_reg(intel_encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
}
}
+static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static const char * const hsw_ddi_pll_names[] = {
+ "WRPLL 1",
+ "WRPLL 2",
+};
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ hsw_ddi_pll_get_hw_state;
+ }
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ if (intel_dp->is_mst)
+ val |= DP_TP_CTL_MODE_MST;
+ else {
+ val |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- int type = intel_encoder->type;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ int type = intel_dig_port->base.type;
+
+ if (type != INTEL_OUTPUT_DISPLAYPORT &&
+ type != INTEL_OUTPUT_EDP &&
+ type != INTEL_OUTPUT_UNKNOWN) {
+ return;
+ }
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_hot_plug(intel_encoder);
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- struct intel_connector *hdmi_connector = NULL;
- struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
port_name(port));
init_hdmi = true;
init_dp = true;
@@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DDI_A_4_LANES);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (init_dp)
- dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(intel_dig_port))
+ goto err;
+
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+ }
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
- hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
-
- if (!dp_connector && !hdmi_connector) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ goto err;
}
+
+ return;
+
+err:
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0be855ddf45..99eb7cad62a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
#include "i915_trace.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+ DRM_FORMAT_C8, \
+ DRM_FORMAT_RGB565, \
+ DRM_FORMAT_XRGB8888, \
+ DRM_FORMAT_ARGB8888
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+ COMMON_PRIMARY_FORMATS,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+ COMMON_PRIMARY_FORMATS, \
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
+{
+ if (!connector->mst_port)
+ return connector->encoder;
+ else
+ return &connector->mst_port->mst_encoders[pipe]->base;
+}
+
typedef struct {
int min, max;
} intel_range_t;
@@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
- return;
- }
-
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
@@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_VALLEYVIEW(dev))
- return;
-
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
@@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev)
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
-
- } else {
- /*
- * If DPIO has already been reset, e.g. by BIOS, just skip all
- * this.
- */
- if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- false);
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- true);
}
}
@@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ if (WARN_ON(pll == NULL))
+ return;
+
WARN_ON(!pll->refcount);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
}
WARN_ON(pll->on);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
+ WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin_from_display_plane(obj);
}
@@ -2314,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
goto out_unref_obj;
}
+ obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c;
struct intel_crtc *i;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
if (!intel_crtc->base.primary->fb)
return;
@@ -2352,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (c == &intel_crtc->base)
continue;
- if (!i->active || !c->primary->fb)
+ if (!i->active)
+ continue;
+
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
@@ -2371,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -2601,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev)
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
@@ -2647,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_framebuffer *old_fb;
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_priv->display.update_primary_plane(crtc, fb, x, y);
- old_fb = crtc->primary->fb;
+ if (intel_crtc->active)
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
@@ -2720,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3587,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void intel_put_shared_dpll(struct intel_crtc *crtc)
+void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3607,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
}
/* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
/* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ drm_vblank_on(dev, pipe);
+
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3904,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip from a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- drm_crtc_vblank_off(crtc);
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip to a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+ drm_vblank_off(dev, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
cpt_verify_modeset(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_enable_shared_dpll(intel_crtc);
+
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -4083,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
- if (intel_crtc->config.has_pch_encoder)
- dev_priv->display.fdi_link_train(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ dev_priv->display.fdi_link_train(crtc);
+ }
+
intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_pipe(dev_priv, pipe);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, false);
+
ironlake_pfit_disable(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4233,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
intel_crtc->active = false;
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_disable_shared_dpll(intel_crtc);
}
static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_shared_dpll(intel_crtc);
}
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
- intel_ddi_put_crtc_pll(crtc);
-}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
@@ -4287,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- switch (intel_dig_port->port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_4_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_4_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_4_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_4_LANES;
- default:
- WARN_ON_ONCE(1);
- return POWER_DOMAIN_PORT_OTHER;
- }
+ return port_to_power_domain(intel_dig_port->port);
+ case INTEL_OUTPUT_DP_MST:
+ intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+ return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask;
enum transcoder transcoder;
@@ -4341,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
+ if (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
}
-int valleyview_get_vco(struct drm_i915_private *dev_priv)
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4408,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
- return vco_freq[hpll_freq];
+ return vco_freq[hpll_freq] * 1000;
+}
+
+static void vlv_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ dev_priv->vlv_cdclk_freq);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
- dev_priv->vlv_cdclk_freq = cdclk;
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
- if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
- else if (cdclk == 266)
+ else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
@@ -4439,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- if (cdclk == 400) {
+ if (cdclk == 400000) {
u32 divider, vco;
vco = valleyview_get_vco(dev_priv);
- divider = ((vco << 1) / cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- val &= ~0xf;
+ val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ 50))
+ DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -4463,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
- if (cdclk == 400)
+ if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
- /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
- intel_i2c_reset(dev);
-}
-
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-{
- int cur_cdclk, vco;
- int divider;
-
- vco = valleyview_get_vco(dev_priv);
-
- mutex_lock(&dev_priv->dpio_lock);
- divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
-
- divider &= 0xf;
-
- cur_cdclk = (vco << 1) / (divider + 1);
-
- return cur_cdclk;
+ vlv_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
+ int vco = valleyview_get_vco(dev_priv);
+ int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
- * 320MHz
+ * 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
+ *
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
*/
- if (max_pixclk > 288000) {
- return 400;
- } else if (max_pixclk > 240000) {
- return 320;
- } else
- return 266;
- /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+ if (max_pixclk > freq_320*9/10)
+ return 400000;
+ else if (max_pixclk > 266667*9/10)
+ return freq_320;
+ else if (max_pixclk > 0)
+ return 266667;
+ else
+ return 200000;
}
/* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_enable_planes(crtc);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4727,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4768,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
+ * We also need to wait on all gmch platforms because of the
+ * self-refresh mode constraint explained above.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4805,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4843,23 +4894,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
}
}
+/* Master function to enable/disable CRTC and corresponding power wells */
+void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains;
+
+ if (enable) {
+ if (!intel_crtc->active) {
+ domains = get_crtc_power_domains(crtc);
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+ intel_crtc->enabled_power_domains = domains;
+
+ dev_priv->display.crtc_enable(crtc);
+ }
+ } else {
+ if (intel_crtc->active) {
+ dev_priv->display.crtc_disable(crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
+ }
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
- if (enable)
- dev_priv->display.crtc_enable(crtc);
- else
- dev_priv->display.crtc_disable(crtc);
+ intel_crtc_control(crtc, enable);
intel_crtc_update_sarea(crtc, enable);
}
@@ -4869,6 +4946,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
- assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
-
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
}
@@ -4939,24 +5016,31 @@ static void intel_connector_check_state(struct intel_connector *connector)
connector->base.base.id,
connector->base.name);
+ /* there is no real hw state for MST connectors */
+ if (connector->mst_port)
+ return;
+
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
- WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
- return;
+ if (encoder) {
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
- crtc = encoder->base.crtc;
+ crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
}
@@ -5161,9 +5245,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
- * clock survives for now. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ /*
+ * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
+ * old clock survives for now.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
- return 400000; /* FIXME */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int vco = valleyview_get_vco(dev_priv);
+ u32 val;
+ int divider;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider = val & DISPLAY_FREQUENCY_VALUES;
+
+ WARN((val & DISPLAY_FREQUENCY_STATUS) !=
+ (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ "cdclk change in progress\n");
+
+ return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6125,8 +6226,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7246,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7264,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -7237,7 +7342,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7349,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(plls->spll_refcount, "SPLL enabled\n");
- WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
- WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
+ if (IS_HASWELL(dev))
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7370,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+ WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7391,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
- DRM_ERROR("Failed to disable D_COMP\n");
+ DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- I915_WRITE(D_COMP, val);
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
}
- POSTING_READ(D_COMP);
}
/*
@@ -7319,12 +7434,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
ndelay(100);
- if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -7373,7 +7489,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
POSTING_READ(LCPLL_CTL);
}
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7595,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
- intel_ddi_pll_enable(intel_crtc);
intel_crtc->lowfreq_avail = false;
return 0;
}
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll;
+ enum port port;
+ uint32_t tmp;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+
+ if (pipe_config->shared_dpll >= 0) {
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
+ }
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ }
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7531,22 +7693,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
- I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
- }
+ haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
@@ -7991,8 +8138,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int x = intel_crtc->cursor_x;
- int y = intel_crtc->cursor_y;
+ int x = crtc->cursor_x;
+ int y = crtc->cursor_y;
u32 base = 0, pos = 0;
if (on)
@@ -8036,21 +8183,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
intel_crtc->cursor_base = base;
}
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file,
- uint32_t handle,
- uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails. If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+ struct drm_i915_gem_object *obj,
+ uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
+ enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
/* if we want to turn off the cursor ignore width and height */
- if (!handle) {
+ if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
@@ -8066,12 +8219,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (&obj->base == NULL)
- return -ENOENT;
-
if (obj->base.size < width * height * 4) {
- DRM_DEBUG_KMS("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
@@ -8126,9 +8275,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
+ i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
old_width = intel_crtc->cursor_width;
@@ -8144,6 +8294,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
}
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8306,6 @@ fail:
return ret;
}
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
- intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
- if (intel_crtc->active)
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- return 0;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8242,7 +8381,7 @@ static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+ return PAGE_ALIGN(pitch * mode->vdisplay);
}
static struct drm_framebuffer *
@@ -8667,16 +8806,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8910,179 @@ out:
intel_runtime_pm_put(dev_priv);
}
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_crtc *crtc;
+ enum pipe pipe;
if (!i915.powersave)
return;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
+ for_each_pipe(pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_edp_psr_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits
+ |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9100,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -8824,6 +9110,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
+ enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9120,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9202,6 +9491,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0;
}
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /*
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (ring == NULL)
+ return true;
+
+ if (INTEL_INFO(ring->dev)->gen < 5)
+ return false;
+
+ if (i915.use_mmio_flip < 0)
+ return false;
+ else if (i915.use_mmio_flip > 0)
+ return true;
+ else
+ return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!obj->last_write_seqno)
+ return 0;
+
+ ring = obj->ring;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+ if (mmio_flip->seqno == 0)
+ continue;
+
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno))
+ return -EBUSY;
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /*
+ * Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9220,13 +9653,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
+ /*
+ * drm_mode_page_flip_ioctl() should already catch this, but double
+ * check to be safe. In the future we may enable pageflipping from
+ * a disabled primary plane.
+ */
+ if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+ return -EBUSY;
+
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
@@ -9249,7 +9691,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9732,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
+ if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ /* vlv: DISPLAY_FLIP fails to change tiling */
+ ring = NULL;
+ } else if (IS_IVYBRIDGE(dev)) {
+ ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9756,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (use_mmio_flip(ring, obj))
+ ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
+ else
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
if (ret)
goto cleanup_unpin;
+ i915_gem_track_fb(work->old_fb_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+
intel_disable_fbc(dev);
- intel_mark_fb_busy(obj, NULL);
+ intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9799,7 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
- drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
@@ -10017,11 +10472,14 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_X(ddi_pll_sel);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10541,14 @@ check_encoder_state(struct drm_device *dev)
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
+ /*
+ * for MST connectors if we unplug the connector is gone
+ * away but the encoder is still connected to a crtc
+ * until a modeset happens in response to the hotplug.
+ */
+ if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue;
+
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
@@ -10378,20 +10844,23 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
+ obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
}
- old_fb = crtc->primary->fb;
if (old_fb)
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = fb;
@@ -10563,12 +11032,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->primary->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
+ /*
+ * If we have no fb, we can only flip as long as the crtc is
+ * active, otherwise we need a full mode set. The crtc may
+ * be active if we've only disabled the primary plane, or
+ * in fastboot situations.
+ */
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915.fastboot) {
+ if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -10620,7 +11094,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = connector->encoder;
+ connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
@@ -10666,7 +11140,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
new_crtc)) {
return -EINVAL;
}
- connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
@@ -10700,7 +11174,12 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder)
+ if (connector->new_encoder != connector->encoder)
+ connector->encoder = connector->new_encoder;
+ }
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -10806,10 +11285,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
intel_crtc_wait_for_pending_flips(set->crtc);
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+
+ /*
+ * We need to make sure the primary plane is re-enabled if it
+ * has previously been turned off.
+ */
+ if (!intel_crtc->primary_enabled && ret == 0) {
+ WARN_ON(!intel_crtc->active);
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+ }
+
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
@@ -10850,26 +11343,21 @@ out_config:
}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
-}
-
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11439,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11449,328 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc;
+
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ intel_crtc = to_intel_crtc(plane->crtc);
+
+ /*
+ * Even though we checked plane->fb above, it's still possible that
+ * the primary plane has been implicitly disabled because the crtc
+ * coordinates given weren't visible, or because we detected
+ * that it was 100% covered by a sprite plane. Or, the CRTC may be
+ * off and we've set a fb, but haven't actually turned on the CRTC yet.
+ * In either case, we need to unpin the FB and let the fb pointer get
+ * updated, but otherwise we don't need to touch the hardware.
+ */
+ if (!intel_crtc->primary_enabled)
+ goto disable_unpin;
+
+ intel_crtc_wait_for_pending_flips(plane->crtc);
+ intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+ intel_plane->pipe);
+disable_unpin:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+ intel_unpin_fb_obj(intel_fb_obj(plane->fb));
+ mutex_unlock(&dev->struct_mutex);
+ plane->fb = NULL;
+
+ return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If the CRTC isn't enabled, we're just pinning the framebuffer,
+ * updating the fb pointer, and returning without touching the
+ * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
+ * turn on the display with all planes setup as desired.
+ */
+ if (!crtc->enabled) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * If we already called setplane while the crtc was disabled,
+ * we may have an fb pinned; unpin it.
+ */
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ /* Pin and return without programming hardware */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+ }
+
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ /*
+ * If clipping results in a non-visible primary plane, we'll disable
+ * the primary plane. Note that this is a bit different than what
+ * happens if userspace explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ if (!visible) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Try to pin the new fb first so that we can bail out if we
+ * fail.
+ */
+ if (plane->fb != fb) {
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ if (intel_crtc->primary_enabled)
+ intel_disable_primary_hw_plane(dev_priv,
+ intel_plane->plane,
+ intel_plane->pipe);
+
+
+ if (plane->fb != fb)
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+ if (ret)
+ return ret;
+
+ if (!intel_crtc->primary_enabled)
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+
+ return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+ .update_plane = intel_primary_plane_setplane,
+ .disable_plane = intel_primary_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *primary;
+ const uint32_t *intel_primary_formats;
+ int num_formats;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL)
+ return NULL;
+
+ primary->can_scale = false;
+ primary->max_downscale = 1;
+ primary->pipe = pipe;
+ primary->plane = pipe;
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+ primary->plane = !pipe;
+
+ if (INTEL_INFO(dev)->gen <= 3) {
+ intel_primary_formats = intel_primary_formats_gen2;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+ } else {
+ intel_primary_formats = intel_primary_formats_gen4;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+ }
+
+ drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_primary_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->config.pipe_src_w,
+ .y2 = intel_crtc->config.pipe_src_h,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &visible);
+ if (ret)
+ return ret;
+
+ crtc->cursor_x = crtc_x;
+ crtc->cursor_y = crtc_y;
+ if (fb != crtc->cursor->fb) {
+ return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+ } else {
+ intel_crtc_update_cursor(crtc, visible);
+ return 0;
+ }
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_cursor_plane_update,
+ .disable_plane = intel_cursor_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *cursor;
+
+ cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+ if (cursor == NULL)
+ return NULL;
+
+ cursor->can_scale = false;
+ cursor->max_downscale = 1;
+ cursor->pipe = pipe;
+ cursor->plane = pipe;
+
+ drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ return &cursor->base;
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int i;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
- drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ primary = intel_primary_plane_create(dev, pipe);
+ if (!primary)
+ goto fail;
+
+ cursor = intel_cursor_plane_create(dev, pipe);
+ if (!cursor)
+ goto fail;
+
+ ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+ cursor, &intel_crtc_funcs);
+ if (ret)
+ goto fail;
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
@@ -10980,7 +11781,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to plane B. Hence we want plane A feeding pipe B.
+ * is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
@@ -11002,6 +11803,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ return;
+
+fail:
+ if (primary)
+ drm_plane_cleanup(primary);
+ if (cursor)
+ drm_plane_cleanup(cursor);
+ kfree(intel_crtc);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11830,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
- if (!drmmode_obj) {
+ if (!drmmode_crtc) {
DRM_ERROR("no such CRTC id\n");
return -ENOENT;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -11236,6 +12044,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ intel_edp_psr_init(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
@@ -11249,11 +12059,14 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
+ mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+ drm_gem_object_unreference(&intel_fb->obj->base);
+ mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
@@ -11438,7 +12251,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -11722,6 +12535,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
intel_prepare_ddi(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_update_cdclk(dev);
+
intel_init_clock_gating(dev);
intel_reset_dpio(dev);
@@ -11798,7 +12614,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_dpio(dev);
intel_reset_dpio(dev);
- intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
/* Just disable it once at startup */
@@ -12024,6 +12839,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.base.id,
encoder->base.name);
encoder->disable(encoder);
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
@@ -12108,10 +12925,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
- /* FIXME: Smash this into the new shared dpll infrastructure. */
- if (HAS_DDI(dev))
- intel_ddi_setup_hw_pll_state(dev);
-
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -12125,6 +12938,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
+
+ if (pll->refcount)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12242,7 +13058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_crtc *c;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -12259,11 +13075,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
- if (!c->primary->fb)
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -12278,13 +13094,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
struct drm_connector *connector = &intel_connector->base;
intel_panel_destroy_backlight(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
struct drm_connector *connector;
/*
@@ -12294,6 +13109,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ dev_priv->pm._irqs_disabled = true;
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12304,14 +13121,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- for_each_crtc(dev, crtc) {
- /* Skip inactive CRTCs */
- if (!crtc->primary->fb)
- continue;
-
- intel_increase_pllclock(crtc);
- }
-
intel_disable_fbc(dev);
intel_disable_gt_powersave(dev);
@@ -12479,7 +13288,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8a1a4fbc06ac..eb52ecfe14cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -114,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static int
+int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -773,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- sysfs_remove_link(&intel_connector->base.kdev->kobj,
- intel_dp->aux.ddc.dev.kobj.name);
+ if (!intel_connector->mst_port)
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
intel_connector_unregister(intel_connector);
}
static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ }
+}
+
+static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
@@ -789,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (IS_HASWELL(dev)) {
- /* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -961,7 +976,10 @@ found:
&pipe_config->dp_m2_n2);
}
- intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+ if (HAS_DDI(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+ else
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
}
@@ -1349,8 +1367,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- edp_wait_backlight_off(intel_dp);
-
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1386,6 +1402,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
+
+ intel_panel_enable_backlight(intel_dp->attached_connector);
+
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,8 +1419,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
-
- intel_panel_enable_backlight(intel_dp->attached_connector);
}
void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1414,8 +1431,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(intel_dp->attached_connector);
-
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
@@ -1425,6 +1440,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies;
+
+ edp_wait_backlight_off(intel_dp);
+
+ intel_panel_disable_backlight(intel_dp->attached_connector);
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1646,11 +1665,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->psr.sink_support;
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1715,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
- if (intel_dp->psr_setup_done)
- return;
-
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1726,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
- intel_dp->psr_setup_done = true;
}
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ bool only_standby = false;
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
@@ -1746,18 +1763,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -1775,18 +1798,15 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- dev_priv->psr.source_ok = false;
+ lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return false;
- }
+ dev_priv->psr.source_ok = false;
- if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
- (dig_port->port != PORT_A)) {
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
@@ -1796,29 +1816,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- crtc = dig_port->base.base.crtc;
- if (crtc == NULL) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc_active(crtc)) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- return false;
- }
-
- if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- return false;
- }
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
@@ -1831,35 +1831,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
- if (!intel_edp_psr_match_conditions(intel_dp) ||
- intel_edp_is_psr_enabled(dev))
- return;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
- if (intel_edp_psr_match_conditions(intel_dp) &&
- !intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp))
+ dev_priv->psr.enabled = intel_dp;
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1892,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!intel_edp_is_psr_enabled(dev))
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ cancel_delayed_work_sync(&dev_priv->psr.work);
}
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
{
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
- if (encoder->type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
- if (!is_edp_psr(dev))
- return;
+ if (!intel_dp)
+ goto unlock;
- if (!intel_edp_psr_match_conditions(intel_dp))
- intel_edp_psr_disable(intel_dp);
- else
- if (!intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
- }
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_edp_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_edp_psr_do_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_edp_psr_do_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2152,6 +2277,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport);
}
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2189,18 +2378,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+ if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2401,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROADWELL(dev)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2699,41 +2873,6 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
-
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
-
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
-
- case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- }
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2883,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROADWELL(dev)) {
- signal_levels = intel_bdw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3246,6 +3382,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
edp_panel_vdd_off(intel_dp, false);
}
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+ u8 buf[1];
+
+ if (!intel_dp->can_mst)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+ return false;
+
+ _edp_panel_vdd_on(intel_dp);
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (buf[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ intel_dp->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = false;
+ }
+ }
+ edp_panel_vdd_off(intel_dp, false);
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ return intel_dp->is_mst;
+}
+
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3283,6 +3446,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
sink_irq_vector, 1) == 1;
}
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SINK_COUNT_ESI,
+ sink_irq_vector, 14);
+ if (ret != 14)
+ return false;
+
+ return true;
+}
+
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
@@ -3290,6 +3467,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+ bool bret;
+
+ if (intel_dp->is_mst) {
+ u8 esi[16] = { 0 };
+ int ret = 0;
+ int retry;
+ bool handled;
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+ if (bret == true) {
+
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3) {
+ break;
+ }
+ }
+
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+ if (bret == true) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ /* send a hotplug event */
+ drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -3298,15 +3532,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- /* FIXME: This access isn't protected by any locks. */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
if (!intel_encoder->connectors_active)
return;
@@ -3518,8 +3753,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
-
- intel_runtime_pm_get(dev_priv);
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3761,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3781,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_probe_oui(intel_dp);
+ ret = intel_dp_probe_mst(intel_dp);
+ if (ret) {
+ /* if we are in MST mode then this connector
+ won't appear connected or have anything with EDID on it */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -3555,9 +3807,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
-
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -3734,6 +3983,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
drm_dp_aux_unregister(&intel_dp->aux);
+ intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3766,12 +4016,64 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static void
+void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ return;
+}
+
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+ DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+ long_hpd ? "long" : "short");
+
+ if (long_hpd) {
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
- intel_dp_check_link_status(intel_dp);
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ goto mst_fail;
+ }
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (!intel_dp_probe_mst(intel_dp))
+ goto mst_fail;
+
+ } else {
+ if (intel_dp->is_mst) {
+ ret = intel_dp_check_mst_status(intel_dp);
+ if (ret == -EINVAL)
+ goto mst_fail;
+ }
+
+ if (!intel_dp->is_mst) {
+ /*
+ * we'll check the link status via the normal hot plug path later -
+ * but for short hpds we should check it now
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+ }
+ return false;
+mst_fail:
+ /* if we were in MST mode, and device is not there get out of MST mode */
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ }
+ return true;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4124,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
return false;
}
-static void
+void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4035,6 +4337,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
+ /*
+ * FIXME: This needs proper synchronization with psr state. But really
+ * hard to tell without seeing the user of this function of this code.
+ * Check locking and ordering once that lands.
+ */
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
@@ -4288,7 +4595,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4628,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
- intel_dp->psr_setup_done = false;
+ /* init MST on ports that can support it */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (port == PORT_B || port == PORT_C || port == PORT_D) {
+ intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+ }
+ }
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4643,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
@@ -4353,6 +4665,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -4379,6 +4692,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4722,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(intel_connector);
}
}
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* disable MST */
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ }
+ }
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ int ret;
+
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret != 0) {
+ intel_dp_check_mst_status(&intel_dig_port->dp);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 000000000000..d9a7a7865f66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ int bpp;
+ int lane_count, slots;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_connector *found = NULL, *intel_connector;
+ int mst_pbn;
+
+ pipe_config->dp_encoder_is_mst = true;
+ pipe_config->has_pch_encoder = false;
+ pipe_config->has_dp_encoder = true;
+ bpp = 24;
+ /*
+ * for MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ */
+ lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->lane_count = lane_count;
+
+ pipe_config->pipe_bpp = 24;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return false;
+ }
+
+ mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ pipe_config->pbn = mst_pbn;
+ slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ pipe_config->dp_m_n.tu = slots;
+ return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ if (ret) {
+ DRM_ERROR("failed to update payload %d\n", ret);
+ }
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ /* this can fail */
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+ intel_dp->active_mst_links--;
+ intel_mst->port = NULL;
+ if (intel_dp->active_mst_links == 0) {
+ intel_dig_port->base.post_disable(&intel_dig_port->base);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+ uint32_t temp;
+ struct intel_connector *found = NULL, *intel_connector;
+ int slots;
+ struct drm_crtc *crtc = encoder->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ intel_mst->port = found->port;
+
+ if (intel_dp->active_mst_links == 0) {
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+
+ intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+ intel_mst->port, intel_crtc->config.pbn, &slots);
+ if (ret == false) {
+ DRM_ERROR("failed to allocate vcpi\n");
+ return;
+ }
+
+
+ intel_dp->active_mst_links++;
+ temp = I915_READ(DP_TP_STATUS(port));
+ I915_WRITE(DP_TP_STATUS(port), temp);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+ 1))
+ DRM_ERROR("Timed out waiting for ACT sent\n");
+
+ ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ *pipe = intel_mst->pipe;
+ if (intel_mst->port)
+ return true;
+ return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ pipe_config->has_dp_encoder = true;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+ pipe_config->adjusted_mode.flags |= flags;
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct edid *edid;
+ int ret;
+
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_connector_status
+intel_mst_port_dp_detect(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ status = intel_mst_port_dp_detect(connector);
+ return status;
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_mst_set_property,
+ .destroy = intel_dp_mst_connector_destroy,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+ .get_modes = intel_dp_mst_get_modes,
+ .mode_valid = intel_dp_mst_mode_valid,
+ .best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+ .destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+ if (connector->encoder) {
+ enum pipe pipe;
+ if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ int i;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector)
+ return NULL;
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+ intel_connector->unregister = intel_connector_unregister;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
+ for (i = PIPE_A; i <= PIPE_C; i++) {
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_dp->mst_encoders[i]->base.base);
+ }
+ intel_dp_add_properties(intel_dp, connector);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_add_to_fbdev(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_register(&intel_connector->base);
+ return connector;
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ intel_connector->unregister(intel_connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_reinit_primary_mode_group(dev);
+
+ kfree(intel_connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = intel_dp_add_mst_connector,
+ .destroy_connector = intel_dp_destroy_mst_connector,
+ .hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst;
+ struct intel_encoder *intel_encoder;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+ if (!intel_mst)
+ return NULL;
+
+ intel_mst->pipe = pipe;
+ intel_encoder = &intel_mst->base;
+ intel_mst->primary = intel_dig_port;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+
+ intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->crtc_mask = 0x7;
+ intel_encoder->cloneable = 0;
+
+ intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->disable = intel_mst_disable_dp;
+ intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+ intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+ intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+ return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+ int i;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ for (i = PIPE_A; i <= PIPE_C; i++)
+ intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ int ret;
+
+ intel_dp->can_mst = true;
+ intel_dp->mst_mgr.cbs = &mst_cbs;
+
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(intel_dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ if (ret) {
+ intel_dp->can_mst = false;
+ return ret;
+ }
+ return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (!intel_dp->can_mst)
+ return;
+
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ /* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f67340ed2c12..8a475a6909c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
/**
* _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
+#define INTEL_OUTPUT_DP_MST 11
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@ struct intel_panel {
struct {
bool present;
u32 level;
+ u32 min;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
@@ -207,6 +209,10 @@ struct intel_connector {
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
+
+ void *port; /* store this opaque as its illegal to dereference it */
+
+ struct intel_dp *mst_port;
};
typedef struct dpll {
@@ -307,6 +313,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
+ /* PORT_CLK_SEL for DDI ports. */
+ uint32_t ddi_pll_sel;
+
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -338,6 +347,7 @@ struct intel_crtc_config {
u32 pos;
u32 size;
bool enabled;
+ bool force_thru;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@ struct intel_crtc_config {
bool ips_enabled;
bool double_wide;
+
+ bool dp_encoder_is_mst;
+ int pbn;
};
struct intel_pipe_wm {
@@ -358,6 +371,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct intel_mmio_flip {
+ u32 seqno;
+ u32 ring_id;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -384,7 +402,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_base;
@@ -394,8 +411,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config;
bool new_enabled;
- uint32_t ddi_pll_sel;
-
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -412,10 +427,12 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
+ struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
+ uint32_t vert_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
@@ -428,7 +445,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
@@ -481,6 +497,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
u32 hdmi_reg;
@@ -491,6 +508,7 @@ struct intel_hdmi {
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
+ enum hdmi_picture_aspect aspect_ratio;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
@@ -499,6 +517,7 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
/**
@@ -537,12 +556,20 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
- bool psr_setup_done;
+
struct notifier_block edp_notifier;
bool use_tps3;
+ bool can_mst; /* this port supports mst */
+ bool is_mst;
+ int active_mst_links;
+ /* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ /* mst connector list */
+ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
@@ -566,6 +593,14 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ bool (*hpd_pulse)(struct intel_digital_port *, bool);
+};
+
+struct intel_dp_mst_encoder {
+ struct intel_encoder base;
+ enum pipe pipe;
+ struct intel_digital_port *primary;
+ void *port; /* store this opaque as its illegal to dereference it */
};
static inline int
@@ -652,6 +687,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return container_of(encoder, struct intel_digital_port, base.base);
}
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
@@ -676,17 +717,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return !dev_priv->pm._irqs_disabled;
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
-
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -705,10 +755,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
-void intel_ddi_pll_enable(struct intel_crtc *crtc);
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -716,17 +763,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
@@ -767,12 +843,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+void intel_put_shared_dpll(struct intel_crtc *crtc);
+
+/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -805,7 +887,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
@@ -826,6 +907,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -833,11 +916,24 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_init(struct drm_device *dev);
+
+int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
@@ -920,8 +1016,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -940,7 +1036,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
@@ -963,6 +1061,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
@@ -976,8 +1075,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable);
+
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3fd082933c87..bfcefbf33709 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -658,7 +658,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
};
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
@@ -674,29 +674,29 @@ bool intel_dsi_init(struct drm_device *dev)
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
- return false;
+ return;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return;
+ }
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
- return false;
+ return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
- return false;
+ return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return false;
- }
-
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -743,7 +743,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
@@ -754,12 +754,10 @@ bool intel_dsi_init(struct drm_device *dev)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- return true;
+ return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
-
- return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d348cedc..47c7584a4aa0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
- };
+ }
data += len;
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
switch (intel_dsi->escape_clk_div) {
case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
*
* prepare count
*/
- ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a3631c0a5c28..56b47d2ffaf7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
@@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->panel_wants_dither = true;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 088fe9378a4c..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+ true);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
@@ -452,7 +478,7 @@ out:
return true;
}
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
@@ -623,7 +649,8 @@ int intel_fbdev_init(struct drm_device *dev)
if (ifbdev == NULL)
return -ENOMEM;
- ifbdev->helper.funcs = &intel_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index eee2bbec2958..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe frame;
int ret;
+ /* Set user selected PAR to incoming mode's member */
+ adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == connector->dev->mode_config.aspect_ratio_property) {
+ switch (val) {
+ case DRM_MODE_PICTURE_ASPECT_NONE:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_4_3:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_16_9:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1229,6 +1249,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
};
static void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_aspect_ratio_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.aspect_ratio_property,
+ DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
+ intel_attach_aspect_ratio_property(connector);
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
@@ -1490,7 +1585,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
@@ -1528,6 +1623,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-enum disp_clk {
- CDCLK,
- CZCLK
-};
-
struct gmbus_port {
const char *name;
int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-static int get_disp_clk_div(struct drm_i915_private *dev_priv,
- enum disp_clk clk)
-{
- u32 reg_val;
- int clk_ratio;
-
- reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
-
- if (clk == CDCLK)
- clk_ratio =
- ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
- else
- clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
-
- return clk_ratio;
-}
-
-static void gmbus_set_freq(struct drm_i915_private *dev_priv)
-{
- int vco, gmbus_freq = 0, cdclk_div;
-
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
-
- vco = valleyview_get_vco(dev_priv);
-
- /* Get the CDCLK divide ratio */
- cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- if (cdclk_div)
- gmbus_freq = (vco << 1) / cdclk_div;
-
- if (WARN_ON(gmbus_freq == 0))
- return;
-
- I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
-}
-
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * In BIOS-less system, program the correct gmbus frequency
- * before reading edid.
- */
- if (IS_VALLEYVIEW(dev))
- gmbus_set_freq(dev_priv);
-
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5e5a72fca5fb..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link;
u32 reg;
+ u32 a3_power;
struct intel_lvds_connector *attached_connector;
};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
@@ -172,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
+ * panels behave in the two modes. For now, let's just maintain the
+ * value we got from the BIOS.
*/
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
@@ -271,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -286,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
- if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
+ if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
@@ -1088,6 +1095,9 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
+ lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+ LVDS_A3_POWER_MASK;
+
/*
* Unlock registers and just
* leave them unlocked
@@ -1104,7 +1114,7 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4f6b53998d79..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
@@ -427,7 +428,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa118978eec..dc2f4f26c961 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
}
intel_overlay_release_old_vid_tail(overlay);
+
+
+ i915_gem_track_fb(overlay->old_vid_bo, NULL,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
u32 swidth, swidthsw, sheight, ostride;
+ enum pipe pipe = overlay->crtc->pipe;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
oconfig |= OCONF_CSC_MODE_BT709;
- oconfig |= overlay->crtc->pipe == 0 ?
+ oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
+ i915_gem_track_fb(overlay->vid_bo, new_bo,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
+ intel_frontbuffer_flip(dev,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
return 0;
out_unpin:
@@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
@@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj) {
+ drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 12b02fe1d0ae..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
}
}
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+ uint32_t source_min, uint32_t source_max,
+ uint32_t target_min, uint32_t target_max)
+{
+ uint64_t target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = (uint64_t)(source_val - source_min) *
+ (target_max - target_min);
+ do_div(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
dev_priv->display.set_backlight(connector, level);
}
-/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max)
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- u32 freq;
+ u32 hw_level;
unsigned long flags;
- u64 n;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
WARN_ON(panel->backlight.max == 0);
- /* scale to hardware max, but be careful to not overflow */
- freq = panel->backlight.max;
- n = (u64)level * freq;
- do_div(n, max);
- level = n;
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, hw_level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 hw_level;
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
- panel->backlight.level = level;
if (panel->backlight.device)
- panel->backlight.device->props.brightness = level;
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(connector, level);
+ intel_panel_actually_set_backlight(connector, hw_level);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
- panel->backlight.level;
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
}
dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hw_level;
int ret;
intel_runtime_pm_get(dev_priv);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- ret = intel_panel_get_backlight(connector);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
- BUG_ON(panel->backlight.max == 0);
+ WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = panel->backlight.level;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
/*
* Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
+ 0, panel->backlight.max);
+}
+
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f1233f544f3e..c3bb925b2e65 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
}
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
- if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ } else if (IS_PINEVIEW(dev)) {
+ val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+ val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ I915_WRITE(DSPFW3, val);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ I915_WRITE(FW_BLC_SELF, val);
+ } else if (IS_I915GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, val);
+ } else {
+ return;
+ }
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("memory self-refresh is %s\n",
+ enable ? "enabled" : "disabled");
}
/*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = G4X_FIFO_SIZE,
+ .max_wm = G4X_MAX_WM,
+ .default_wm = G4X_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
- VALLEYVIEW_FIFO_SIZE,
- VALLEYVIEW_MAX_WM,
- VALLEYVIEW_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = VALLEYVIEW_FIFO_SIZE,
+ .max_wm = VALLEYVIEW_MAX_WM,
+ .default_wm = VALLEYVIEW_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
- I965_CURSOR_FIFO,
- VALLEYVIEW_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
/**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ intel_set_memory_cxsr(dev_priv, true);
} else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ intel_set_memory_cxsr(dev_priv, false);
}
}
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
vlv_update_drain_latency(dev);
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF_VLV,
- I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
+ bool cxsr_enabled;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
+ cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ intel_set_memory_cxsr(dev_priv, false);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev) && enabled) {
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
- fb = to_intel_framebuffer(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->fb);
/* self-refresh seems busted with untiled */
- if (fb->obj->tiling_mode == I915_TILING_NONE)
+ if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL;
}
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+ intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
+ if (enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
/*
@@ -2852,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ dev_priv->display.update_sprite_wm(plane, crtc,
+ sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
@@ -3147,6 +3188,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+ mask &= dev_priv->pm_rps_events;
+
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
@@ -3250,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3394,15 @@ static void gen6_disable_rps(struct drm_device *dev)
gen6_disable_rps_interrupts(dev);
}
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen8_disable_rps_interrupts(dev);
+}
+
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3419,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3430,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3483,15 +3538,23 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+ else
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3790,57 @@ void gen6_update_ring_freq(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+ rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+ return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp1;
+}
+
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpn;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ return rpn;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
@@ -3752,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
return rpe;
}
-int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
@@ -3766,6 +3879,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
dev_priv->vlv_pctx->stolen->start);
}
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pctx_paddr, paddr;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ u32 pcbr;
+ int pctx_size = 32*1024;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ pcbr = I915_READ(VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ paddr = (dev_priv->mm.stolen_base +
+ (gtt->stolen_size - pctx_size));
+
+ pctx_paddr = (paddr & (~4095));
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+ }
+}
+
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
+ dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4002,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ cherryview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
+ dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
{
valleyview_cleanup_pctx(dev);
}
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ cherryview_check_pctx(dev_priv);
+
+ /* 1a & 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* For now we assume BIOS is allocating and populating the PCBR */
+ pcbr = I915_READ(VLV_PCBR);
+
+ DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+ /* 3: Enable RC6 */
+ if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+ I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+ I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+ gen8_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4164,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4185,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
+
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -4666,33 +4947,60 @@ void intel_init_gt_powersave(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_init_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_init_gt_powersave(dev);
}
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ return;
+ else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
}
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ WARN_ON(intel_irqs_enabled(dev_priv));
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ cancel_work_sync(&dev_priv->rps.work);
+
+ /* Force GPU to min freq during suspend */
+ gen6_rps_idle(dev_priv);
+}
+
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(dev->irq_enabled);
+ WARN_ON(intel_irqs_enabled(dev_priv));
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
- if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
- intel_runtime_pm_put(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ intel_suspend_gt_powersave(dev);
- cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_disable_rps(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
@@ -4710,7 +5018,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ cherryview_enable_rps(dev);
+ } else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
@@ -4735,7 +5045,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -5108,7 +5418,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3,
- _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5343,10 +5653,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
- dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
- dev_priv->vlv_cdclk_freq);
-
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
@@ -5421,6 +5727,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 2) & 0x7) {
+ case 0:
+ case 1:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 2:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 3:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+ dev_priv->mem_freq = 2000;
+ break;
+ case 4:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 5:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+ dev_priv->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5661,7 +5996,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- unsigned long irqflags;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5677,21 +6011,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
- dev_priv->de_irq_mask[PIPE_B]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
- ~dev_priv->de_irq_mask[PIPE_B] |
- GEN8_PIPE_VBLANK);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
- dev_priv->de_irq_mask[PIPE_C]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
- ~dev_priv->de_irq_mask[PIPE_C] |
- GEN8_PIPE_VBLANK);
- POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv);
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5762,34 +6083,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
- enum pipe pipe;
-
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- if (enable) {
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- } else {
- for_each_pipe(pipe)
- assert_pll_disabled(dev_priv, pipe);
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
- ~DPIO_CMNRST);
- }
- }
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5817,28 +6117,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
out:
mutex_unlock(&dev_priv->rps.hw_lock);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
-
- __vlv_set_power_well(dev_priv, power_well_id, enable);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5930,6 +6208,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -6079,6 +6404,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6178,6 +6504,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
@@ -6238,10 +6571,25 @@ static struct i915_power_well vlv_power_wells[] = {
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_power_well_ops,
+ .ops = &vlv_dpio_cmn_power_well_ops,
},
};
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6292,11 +6640,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* nothing to do if common lane is already off */
+ if (!cmn->ops->is_enabled(dev_priv, cmn))
+ return;
+
+ /* If the display might be already active skip this */
+ if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
@@ -6469,7 +6856,7 @@ void intel_init_pm(struct drm_device *dev)
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
@@ -6552,7 +6939,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div;
@@ -6574,7 +6961,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
@@ -6596,6 +6983,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, freq;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ div = 5;
+ break;
+ case 267:
+ div = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ div = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+
+ return freq;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int mul, opcode;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ mul = 5;
+ break;
+ case 267:
+ mul = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ mul = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+
+ return opcode;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_gpu_freq(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_gpu_freq(dev_priv, val);
+
+ return ret;
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_freq_opcode(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_freq_opcode(dev_priv, val);
+
+ return ret;
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6606,5 +7067,5 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a9928a..fd4f66231d30 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
struct intel_renderstate_rodata {
const u32 *reloc;
- const u32 reloc_items;
const u32 *batch;
const u32 batch_items;
};
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
- .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538ad0977..56c1429d8a60 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
0x0000002c,
0x000001e0,
0x000001e4,
+ -1,
};
static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2a1298..419e35a7b0ff 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
0x00000010,
0x00000018,
0x000001ec,
+ -1,
};
static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c875615d42a..75ef1b5de45c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
0x00000050,
0x00000060,
0x000003ec,
+ -1,
};
static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488addf3f..b3d8f766fa7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_engine_cs *ring)
+static inline int ring_space(struct intel_ringbuffer *ringbuf)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
}
@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
@@ -604,6 +603,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
+ if (ret)
+ return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->semaphore_obj) {
+ i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+ drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+ dev_priv->semaphore_obj = NULL;
+ }
if (ring->scratch.obj == NULL)
return;
@@ -671,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
ring->scratch.obj = NULL;
}
+static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, 0);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
+static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+ MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret;
+ int i, ret, num_rings;
- /* NB: In order to be able to do semaphore MBOX updates for varying
- * number of rings, it's easiest if we round up each individual update
- * to a multiple of 2 (since ring updates must always be a multiple of
- * 2) even though the actual update only requires 3 dwords.
- */
-#define MBOX_UPDATE_DWORDS 4
- if (i915_semaphore_is_enabled(dev))
- num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
- else
- return intel_ring_begin(signaller, num_dwords);
+#define MBOX_UPDATE_DWORDS 3
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
- intel_ring_emit(signaller, MI_NOOP);
- } else {
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
}
}
+ /* If num_dwords was rounded, make sure the tail pointer is correct */
+ if (num_rings % 2 == 0)
+ intel_ring_emit(signaller, MI_NOOP);
+
return 0;
}
@@ -727,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
{
int ret;
- ret = ring->semaphore.signal(ring, 4);
+ if (ring->semaphore.signal)
+ ret = ring->semaphore.signal(ring, 4);
+ else
+ ret = intel_ring_begin(ring, 4);
+
if (ret)
return ret;
@@ -754,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
+
+static int
+gen8_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ int ret;
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter,
+ lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter,
+ upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_advance(waiter);
+ return 0;
+}
+
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
@@ -901,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0)
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -916,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0)
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1109,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1129,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1147,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1167,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1329,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj;
if ((obj = ring->status_page.obj) == NULL) {
+ unsigned flags;
int ret;
obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ flags = 0;
+ if (!HAS_LLC(ring->dev))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actualy map it).
+ */
+ flags |= PIN_MAPPABLE;
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) {
err_unref:
drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-static int allocate_ring_buffer(struct intel_engine_cs *ring)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+ if (!ringbuf->obj)
+ return;
+
+ iounmap(ringbuf->virtual_start);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
int ret;
- if (intel_ring_initialized(ring))
+ if (ringbuf->obj)
return 0;
obj = NULL;
@@ -1397,6 +1524,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
if (obj == NULL)
return -ENOMEM;
+ /* mark ring buffers as read-only from GPU side by default */
+ obj->gt_ro = 1;
+
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
@@ -1455,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = allocate_ring_buffer(ring);
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
@@ -1496,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ringbuf->virtual_start);
-
- i915_gem_object_ggtt_unpin(ringbuf->obj);
- drm_gem_object_unreference(&ringbuf->obj->base);
- ringbuf->obj = NULL;
+ intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1526,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
@@ -1549,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1578,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1630,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1746,14 +1872,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno);
- if (INTEL_INFO(ring->dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(ring->dev))
+ if (HAS_VEBOX(dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
@@ -1941,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_object *obj;
+ int ret;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (i915_semaphore_is_enabled(dev)) {
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ ring->add_request = gen6_add_request;
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (i915_semaphore_is_enabled(dev)) {
+ WARN_ON(!dev_priv->semaphore_obj);
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
+ } else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
- if (INTEL_INFO(dev)->gen >= 8) {
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- } else {
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- }
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between RCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and RCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
+
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
@@ -2024,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- struct drm_i915_gem_object *obj;
- int ret;
-
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between VCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and VCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@@ -2218,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
return -EINVAL;
}
- ring->name = "bds2_ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
ring->write_tail = ring_write_tail;
@@ -2233,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on the pre-gen8. And there
- * is no bsd2 ring on the pre-gen8. So now the semaphore_register
- * between VCS2 and other ring is initialized as invalid.
- * Gen8 will initialize the sema between VCS2 and other ring later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between BCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform. And
- * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
- * between BCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between BCS and VCS2 later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..ed5941078f92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+ if (!dev_priv->semaphore_obj) { \
+ break; \
+ } \
+ ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+ ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+ ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+ ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+ ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+ ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ } while(0)
+
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -127,15 +153,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
+ /* GEN8 signal/wait table - never trust comments!
+ * signal to signal to signal to signal to signal to
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+ * ie. transpose of g(x, y)
+ *
+ * sync from sync from sync from sync from sync from
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+ * ie. transpose of f(x, y)
+ */
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
- struct {
- /* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
- /* mboxes this ring signals to */
- u32 signal[I915_NUM_RINGS];
- } mbox;
+ union {
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+ u64 signal_ggtt[I915_NUM_RINGS];
+ };
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx;
/*
- * cs -> 0 = vcs, 1 = bcs
- * vcs -> 0 = bcs, 1 = cs,
- * bcs -> 0 = cs, 1 = vcs.
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - ring) - 1;
@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
- return ring->buffer->tail;
+ return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 20375cc7f82d..9350edd6728d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- ret = drm_sysfs_connector_add(drm_connector);
+ ret = drm_connector_register(drm_connector);
if (ret < 0)
goto err1;
@@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
return 0;
err2:
- drm_sysfs_connector_remove(drm_connector);
+ drm_connector_unregister(drm_connector);
err1:
drm_connector_cleanup(drm_connector);
@@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e92ef4..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprctl |= SP_ENABLE;
- intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+ true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static void
@@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
intel_plane->disable_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
@@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane)
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
+ enum pipe pipe;
if (!plane->fb)
return 0;
@@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane)
return -EINVAL;
intel_crtc = to_intel_crtc(plane->crtc);
+ pipe = intel_crtc->pipe;
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
+ i915_gem_track_fb(intel_plane->obj, NULL,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, set->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
@@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, get->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67c6c9a2eb1c..e211eef4b7e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1680,5 +1680,5 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7ac069..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
}
/* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
intel_runtime_pm_put(dev_priv);
}
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
- } else {
- dev_priv->uncore.forcewake_count = 0;
- dev_priv->uncore.fw_rendercount = 0;
- dev_priv->uncore.fw_mediacount = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev, restore_forcewake);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (((reg) >= 0x2000 && (reg) < 0x4000) ||\
- ((reg) >= 0x5000 && (reg) < 0x8000) ||\
- ((reg) >= 0xB000 && (reg) < 0x12000) ||\
- ((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
- (((reg) >= 0x12000 && (reg) < 0x14000) ||\
- ((reg) >= 0x22000 && (reg) < 0x24000) ||\
- ((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0xB000, 0x12000) || \
+ REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x22000, 0x24000) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8800, 0x8900) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1C000) || \
+ REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x4000, 0x5000) || \
+ REG_RANGE((reg), 0x8000, 0x8300) || \
+ REG_RANGE((reg), 0x8500, 0x8600) || \
+ REG_RANGE((reg), 0x9000, 0xB000) || \
+ REG_RANGE((reg), 0xC000, 0xC800) || \
+ REG_RANGE((reg), 0xF000, 0x10000) || \
+ REG_RANGE((reg), 0x14000, 0x14400) || \
+ REG_RANGE((reg), 0x22000, 0x24000))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+ bool before)
{
+ const char *op = read ? "reading" : "writing to";
+ const char *when = before ? "before" : "after";
+
+ if (!i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
+ WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+ when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
+ if (i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
+ DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
@@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
}
@@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_READ_FOOTER; \
+}
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
@@ -591,6 +655,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
@@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_check(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
@@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ unsigned fwengine = 0; \
+ bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+ REG_WRITE_HEADER; \
+ if (!shadowed) { \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
@@ -716,6 +817,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
@@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev);
+ intel_uncore_early_sanitize(dev, false);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = chv_write8;
+ dev_priv->uncore.funcs.mmio_writew = chv_write16;
+ dev_priv->uncore.funcs.mmio_writel = chv_write32;
+ dev_priv->uncore.funcs.mmio_writeq = chv_write64;
+ dev_priv->uncore.funcs.mmio_readb = chv_read8;
+ dev_priv->uncore.funcs.mmio_readw = chv_read16;
+ dev_priv->uncore.funcs.mmio_readl = chv_read32;
+ dev_priv->uncore.funcs.mmio_readq = chv_read64;
+
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 7:
case 6:
@@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
- switch (INTEL_INFO(dev)->gen) {
- case 8:
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4:
- if (IS_G4X(dev))
- return g4x_do_reset(dev);
- else
- return i965_do_reset(dev);
- default: return -ENODEV;
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset(dev);
+ else if (IS_GEN5(dev))
+ return ironlake_do_reset(dev);
+ else if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else if (IS_GEN4(dev))
+ return i965_do_reset(dev);
+ else
+ return -ENODEV;
}
void intel_uncore_check_errors(struct drm_device *dev)