summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c50
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c66
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c23
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c211
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c91
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
79 files changed, 765 insertions, 370 deletions
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03711d00aaae..8218078b6133 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
retcode = -EFAULT;
goto err_i1;
}
- } else
+ } else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
+ }
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 482127f633c5..9e530f205ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- if (!ctx->panel->connector)
+ if (ctx->panel && !ctx->panel->connector)
drm_panel_attach(ctx->panel, &ctx->connector);
return connector_status_connected;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d91f27777537..ab7d182063c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -765,24 +765,24 @@ static int exynos_drm_init(void)
return 0;
-err_unregister_pd:
- platform_device_unregister(exynos_drm_pdev);
-
err_remove_vidi:
#ifdef CONFIG_DRM_EXYNOS_VIDI
exynos_drm_remove_vidi();
+
+err_unregister_pd:
#endif
+ platform_device_unregister(exynos_drm_pdev);
return ret;
}
static void exynos_drm_exit(void)
{
+ platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_VIDI
exynos_drm_remove_vidi();
#endif
platform_device_unregister(exynos_drm_pdev);
- platform_driver_unregister(&exynos_drm_platform_driver);
}
module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 36535f398848..06cde4506278 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct device *dev);
#else
static inline struct exynos_drm_display *
-exynos_dpi_probe(struct device *dev) { return 0; }
+exynos_dpi_probe(struct device *dev) { return NULL; }
static inline int exynos_dpi_remove(struct device *dev) { return 0; }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bb45ab2e7384..33161ad38201 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
win_data = &ctx->win_data[i];
if (win_data->enabled)
fimd_win_commit(mgr, i);
+ else
+ fimd_win_disable(mgr, i);
}
fimd_commit(mgr);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c104d0c9b385..aa259b0a873a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2090,6 +2090,11 @@ out:
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_encoder *encoder = hdata->encoder;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc_helper_funcs *funcs = NULL;
+
DRM_DEBUG_KMS("mode %d\n", mode);
switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->dpms)
+ (*funcs->dpms)(crtc, mode);
+
hdmi_poweroff(display);
break;
default:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4c5aed7e54c8..7529946d0a74 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
mixer_regs_dump(ctx);
}
+static void mixer_stop(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int timeout = 20;
+
+ mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ --timeout)
+ usleep_range(10000, 12000);
+
+ mixer_regs_dump(ctx);
+}
+
static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
static void mixer_layer_update(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- u32 val;
-
- val = mixer_reg_read(res, MXR_CFG);
- /* allow one update per vsync only */
- if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
- mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
+ drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
atomic_set(&mixer_ctx->wait_vsync_event, 1);
/*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
!atomic_read(&mixer_ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+ drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
}
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
mutex_unlock(&ctx->mixer_mutex);
return;
}
- ctx->powered = true;
+
mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->sclk_mixer);
}
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
+ if (!ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
mutex_unlock(&ctx->mixer_mutex);
+ mixer_stop(ctx);
mixer_window_suspend(mgr);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+ mutex_unlock(&ctx->mixer_mutex);
+
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
}
pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
}
static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 4537026bc385..5f32e1a29411 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -78,6 +78,7 @@
#define MXR_STATUS_BIG_ENDIAN (1 << 3)
#define MXR_STATUS_ENDIAN_MASK (1 << 3)
#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_IDLE (1 << 1)
#define MXR_STATUS_REG_RUN (1 << 0)
/* bits for MXR_CFG */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 240c331405b9..ac357b02bd35 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -810,6 +810,12 @@ static int
tda998x_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
return MODE_OK;
}
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return i;
}
} else {
- for (i = 10; i > 0; i--) {
- msleep(10);
+ for (i = 100; i > 0; i--) {
+ msleep(1);
ret = reg_read(priv, REG_INT_FLAGS_2);
if (ret < 0)
return ret;
@@ -1183,7 +1189,6 @@ static void
tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- drm_i2c_encoder_destroy(encoder);
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
if (priv->cec)
i2c_unregister_device(priv->cec);
+ drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 601caa88c092..b8c689202c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
+ spin_lock(&file->table_lock);
idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
/*
* Although we have a valid reference on file->pid, that does
* not guarantee that the task_struct who called get_pid() is
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6c656392d67d..d44344140627 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1464,12 +1464,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
- int ret;
+ int ret = 0;
DRM_INFO("Replacing VGA console driver\n");
console_lock();
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
if (ret == 0) {
ret = do_unregister_con_driver(&vga_con);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 49414d30e8d4..374f964323ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -656,6 +656,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -977,6 +978,8 @@ struct i915_power_well {
bool always_on;
/* power well enable/disable usage count */
int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
unsigned long domains;
unsigned long data;
const struct i915_power_well_ops *ops;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d893e4da5dce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
return ret;
}
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
- struct i915_vma *vma;
-
- /*
- * Only the global gtt is relevant for gtt memory mappings, so restrict
- * list traversal to objects bound into the global address space. Note
- * that the active list should be empty, but better safe than sorry.
- */
- WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
- list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
- i915_gem_release_mmap(vma->obj);
- list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
- i915_gem_release_mmap(vma->obj);
-}
-
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ i915_gem_release_mmap(obj);
+}
+
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3ffe308d5893..a5ddf3bce9c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
+ bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
+ uninitialized = !to->is_initialized && from == NULL;
+ to->is_initialized = true;
+
done:
i915_gem_context_reference(to);
ring->last_context = to;
to->last_ring = ring;
- if (ring->id == RCS && !to->is_initialized && from == NULL) {
+ if (uninitialized) {
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
- to->is_initialized = true;
-
return 0;
unpin_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..34894b573064 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
struct i915_render_state {
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
- void *batch;
+ u32 *batch;
u32 size;
u32 len;
};
@@ -80,7 +80,7 @@ free:
static void render_state_free(struct i915_render_state *so)
{
- kunmap(so->batch);
+ kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62ef55ba061c..7465ab0fd396 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (base == 0)
return 0;
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765ad..c05c84f3f091 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
- u32 seqno, ctl;
+ u32 seqno;
ring->hangcheck.deadlock++;
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
- /* cursory check for an unkickable deadlock */
- ctl = I915_READ_CTL(signaller);
- if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
- return -1;
-
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
- if (signaller->hangcheck.deadlock)
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
return -1;
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e691b30b2817..a5bab61bfc00 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -942,6 +942,9 @@ enum punit_power_well {
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1ee98f121a00..827498e081df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
- /* Err to enabling backlight if no backlight block. */
- dev_priv->vbt.backlight.present = true;
-
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+ /* Default to having backlight */
+ dev_priv->vbt.backlight.present = true;
+
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index efd3cf50cb0f..f0be855ddf45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
+ struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
@@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
/**
@@ -4564,7 +4573,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- vlv_prepare_pll(intel_crtc);
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi && !IS_CHERRYVIEW(dev))
+ vlv_prepare_pll(intel_crtc);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4610,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_enable_pll(intel_crtc);
@@ -11087,6 +11097,22 @@ const char *intel_output_name(int output)
return names[output];
}
+static bool intel_crt_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_ULT(dev))
+ return false;
+
+ if (IS_CHERRYVIEW(dev))
+ return false;
+
+ if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+ return false;
+
+ return true;
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11095,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_lvds_init(dev);
- if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
+ if (intel_crt_present(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -11565,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -11633,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5336 */
{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -11871,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* ... */
plane = crtc->plane;
crtc->plane = !plane;
+ crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
@@ -12411,8 +12455,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ intel_display_power_enabled_unlocked(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -12447,7 +12491,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
+ intel_display_power_enabled_unlocked(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 52fda950fd2a..8a1a4fbc06ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+ This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+ edp_notifier);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_div;
+ u32 pp_ctrl_reg, pp_div_reg;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ if (!is_edp(intel_dp) || code != SYS_RESTART)
+ return 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
+
+ return 0;
+}
+
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = min_clock; clock <= max_clock; clock++) {
+ for (clock = min_clock; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->edp_notifier.notifier_call) {
+ unregister_reboot_notifier(&intel_dp->edp_notifier);
+ intel_dp->edp_notifier.notifier_call = NULL;
+ }
}
kfree(intel_dig_port);
}
@@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
+ if (IS_VALLEYVIEW(dev)) {
+ intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+ register_reboot_notifier(&intel_dp->edp_notifier);
+ }
+
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bda0ae3d80cc..f67340ed2c12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -538,6 +538,8 @@ struct intel_dp {
unsigned long last_power_on;
unsigned long last_backlight_off;
bool psr_setup_done;
+ struct notifier_block edp_notifier;
+
bool use_tps3;
struct intel_connector *attached_connector;
@@ -950,8 +952,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 02f99d768d49..3fd082933c87 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
+ usleep_range(2500, 3000);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 3eeb21b9fddf..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
else
cmd |= DPI_LP_MODE;
- /* DPI virtual channel?! */
-
- mask = DPI_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 23126023aeba..5e5a72fca5fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 2e2c71fcc9ed..4f6b53998d79 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ /*
+ * If the acpi_video interface is not supposed to be used, don't
+ * bother processing backlight level change requests from firmware.
+ */
+ if (!acpi_video_verify_backlight_support()) {
+ DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ return 0;
+ }
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 38a98570d10c..12b02fe1d0ae 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -1118,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
int ret;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("native backlight control not available per VBT\n");
- return 0;
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ } else {
+ DRM_DEBUG_KMS("no backlight present per VBT\n");
+ return 0;
+ }
}
/* set level and max in panel struct */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54242e4f6f4c..ee72807069e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3209,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Latest VLV doesn't need to force the gfx clock */
+ if (dev->pdev->revision >= 0xd) {
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ return;
+ }
+
/*
* When we are idle. Drop to min voltage state.
*/
@@ -5603,8 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@@ -5615,16 +5623,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
return false;
power_domains = &dev_priv->power_domains;
+
is_enabled = true;
+
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
- if (!power_well->count) {
+ if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
+
return is_enabled;
}
@@ -5632,30 +5643,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- bool is_enabled;
- int i;
-
- if (dev_priv->pm.suspended)
- return false;
+ bool ret;
power_domains = &dev_priv->power_domains;
- is_enabled = true;
-
mutex_lock(&power_domains->lock);
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- if (power_well->always_on)
- continue;
-
- if (!power_well->ops->is_enabled(dev_priv, power_well)) {
- is_enabled = false;
- break;
- }
- }
+ ret = intel_display_power_enabled_unlocked(dev_priv, domain);
mutex_unlock(&power_domains->lock);
- return is_enabled;
+ return ret;
}
/*
@@ -5976,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
}
check_power_well_state(dev_priv, power_well);
@@ -6005,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
@@ -6048,6 +6046,27 @@ int i915_release_power_well(void)
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
@@ -6267,8 +6286,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
mutex_unlock(&power_domains->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1b66ddcdfb33..9a17b4e92ef4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -691,6 +691,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ /*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ae750f6928c1..7f7aadef8a82 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
config.phy_init = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.pwr_reg_names = pwr_reg_names;
config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9fafee6a3e43..9d7723c6528a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
/* clks that need to be on for hpd: */
const char **hpd_clk_names;
+ const long unsigned *hpd_freq;
int hpd_clk_cnt;
/* clks that need to be on for screen pwr (ie pixel clk): */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e56a6196867c..28f7e3ec6c28 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 42caf7fcb0b9..71510ee26e96 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -20,6 +20,10 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp5_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
kfree(mdp5_kms);
}
@@ -216,10 +226,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
+ dev_err(dev->dev, "failed to init iommu: %d\n", ret);
goto fail;
}
+
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
- if (ret)
+ if (ret) {
+ dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
goto fail;
+ }
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
}
+ mdp5_kms->mmu = mmu;
mdp5_kms->id = msm_register_mmu(dev, mmu);
if (mdp5_kms->id < 0) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a2522c25..6e981b692d1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -33,6 +33,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
+ struct msm_mmu *mmu;
/* for tracking smp allocation amongst pipes: */
mdp5_smp_state_t smp_state;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0d2562fb681e..9a5d87db5c23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
- const static struct of_device_id match_types[] = { {
+ static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
.data = (void *)5,
}, {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index a752ab83b810..5107fc4826bc 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- dma_addr_t paddr;
+ uint32_t paddr;
int ret, size;
sizes->surface_bpp = 32;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..690d7e7b6d1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
+ if (!mmu) {
+ dev_err(dev->dev, "null MMU pointer\n");
+ return -EINVAL;
+ }
+
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 92b745986231..4b2ad9181edf 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ return -ENOSYS;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev->dev, "couldn't get %s context", names[i]);
continue;
+ }
ret = iommu_attach_device(iommu->domain, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ iommu_detach_device(iommu->domain, ctx);
+ }
+}
+
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+ BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
+ .detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 030324482b4a..21da6d154f71 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -22,6 +22,7 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 26e962b7e702..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
}
switch ((ctrl & 0x000f0000) >> 16) {
- case 6: datarate = pclk * 30 / 8; break;
- case 5: datarate = pclk * 24 / 8; break;
+ case 6: datarate = pclk * 30; break;
+ case 5: datarate = pclk * 24; break;
case 2:
default:
- datarate = pclk * 18 / 8;
+ datarate = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 48aa38a87e3f..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
if (outp->info.type == DCB_OUTPUT_DP) {
u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
switch ((sync & 0x000003c0) >> 6) {
- case 6: pclk = pclk * 30 / 8; break;
- case 5: pclk = pclk * 24 / 8; break;
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
case 2:
default:
- pclk = pclk * 18 / 8;
+ pclk = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 52c299c3d300..eb2d7789555d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
struct nvkm_output_dp *outp = (void *)base;
bool retrain = true;
u8 link[2], stat[3];
- u32 rate;
+ u32 linkrate;
int ret, i;
/* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
goto done;
}
- rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
- if (rate < ((datarate / 8) * 10)) {
+ linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+ linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+ datarate = (datarate + 9) / 10; /* -> decakilobits */
+ if (linkrate < datarate) {
DBG("link not trained at sufficient rate\n");
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index e1832778e8b6..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
struct nvkm_output_dp *outpdp = (void *)outp;
switch (data) {
case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+ nouveau_event_put(outpdp->irq);
((struct nvkm_output_dp_impl *)nv_oclass(outp))
->lnk_pwr(outpdp, 0);
atomic_set(&outpdp->lt.done, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
};
}
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
ramfuc_reg(u32 addr)
{
return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 1ad3ea503133..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
/* (re)program mempll, if required */
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+ ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NOUVEAU_THERM_THRS_SHUTDOWN);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
/* schedule the next poll in one second */
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
- ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ ptimer->alarm(ptimer, 1000000000ULL, alarm);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 64a42cfd3717..191665ee7f52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
if (state == 1)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 0)
+ if (state == 0) {
nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_zfill(dev, drm->fbcon);
+ }
console_unlock();
}
}
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- nouveau_fbcon_zfill(dev, drm->fbcon);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
void nouveau_fbcon_restore_accel(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index afdf607df3e6..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+ mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+ mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
mthd |= nv_encoder->or;
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
pending = xchg(&qdev->ram_header->int_pending, 0);
+ if (!pending)
+ return IRQ_NONE;
+
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c5b1f2da3954..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
- r = -EBUSY;
+ r = -EIO;
goto done;
}
@@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
+
+ char dpcd_hex_dump[DP_DPCD_SIZE * 3];
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < DP_DPCD_SIZE; i++)
- DRM_DEBUG_KMS("%02x ", msg[i]);
- DRM_DEBUG_KMS("\n");
+
+ hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
radeon_dp_probe_oui(radeon_connector);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder;
- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ /* Set a reasonable default here if the level is 0 otherwise
+ * fbdev will attempt to turn the backlight on after console
+ * unblanking and it will try and restore 0 which turns the backlight
+ * off again.
+ */
+ if (bd->props.brightness == 0)
+ bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..584090ac3eb9 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
tmp &= ~GLOBAL_PWRMGT_EN;
WREG32_SMC(GENERAL_PWRMGT, tmp);
- tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
tmp &= ~DYNAMIC_PM_EN;
WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index dcd4518a9b08..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -7676,14 +7678,16 @@ restart_ih:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cik_vm_decode_fault(rdev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 167: /* VCE */
DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ae88660f34ea..0c6e1b55d968 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -1752,12 +1752,12 @@
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
#define EOP_TCL1_ACTION_EN (1 << 16)
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
+#define EOP_TCL2_VOLATILE (1 << 24)
#define EOP_CACHE_POLICY(x) ((x) << 25)
/* 0 - LRU
* 1 - Stream
* 2 - Bypass
*/
-#define EOP_TCL2_VOLATILE (1 << 27)
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 5a9a5f4d7888..47d31e915758 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e2f605224e8c..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -5066,14 +5068,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cayman_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 3f6e817d97ee..9ef8c38f2d66 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = false;
+ pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 004c931606c4..01fc4888e6fe 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4b0bbf88d5c0..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -102,6 +102,7 @@ extern int radeon_runtime_pm;
extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
+extern int radeon_deep_color;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -448,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */
struct list_head vm_list;
+ struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@@ -683,10 +685,9 @@ struct radeon_flip_work {
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
- struct drm_framebuffer *fb;
+ uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct radeon_bo *new_rbo;
struct radeon_fence *fence;
};
@@ -749,10 +750,6 @@ union radeon_irq_stat_regs {
struct cik_irq_stat_regs cik;
};
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
struct radeon_irq {
bool installed;
spinlock_t lock;
@@ -871,6 +868,9 @@ struct radeon_vm {
struct list_head va;
unsigned id;
+ /* BOs freed, but not yet updated in the PT */
+ struct list_head freed;
+
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@@ -879,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
+ struct radeon_bo_va *ib_bo_va;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -2836,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@@ -2851,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 30844814c25a..173f378428a9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0) {
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ else if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
+ /* set a reasonable default for DP */
+ if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+ DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+ rdev->clock.default_dispclk / 100);
+ rdev->clock.default_dispclk = 60000;
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
rdev->clock.current_dispclk = rdev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1b9177ed181f..44831197e82e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
}
}
+ if ((radeon_deep_color == 0) && (bpc > 8))
+ bpc = 8;
+
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
+ struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ r = radeon_vm_clear_freed(rdev, vm);
+ if (r)
+ return r;
+
+ if (vm->ib_bo_va == NULL) {
+ DRM_ERROR("Tmp BO not in VM!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
- r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
- if (radeon_vm_size < 4) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ if (radeon_vm_size < 1) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
- if (radeon_vm_size > 1024*1024) {
- dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ if (radeon_vm_size > 1024) {
+ dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
- radeon_vm_size < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8fc362aa6a1a..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -285,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
- struct radeon_flip_work *work;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
@@ -295,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
return;
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- work = radeon_crtc->flip_work;
- if (work == NULL) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -344,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->flip_work;
- if (work == NULL) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
/* Pageflip completed. Clean up. */
+ radeon_crtc->flip_status = RADEON_FLIP_NONE;
radeon_crtc->flip_work = NULL;
/* wakeup userspace */
@@ -359,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
- radeon_fence_unref(&work->fence);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -379,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
- struct drm_framebuffer *fb = work->fb;
-
- uint32_t tiling_flags, pitch_pixels;
- uint64_t base;
-
unsigned long flags;
int r;
down_read(&rdev->exclusive_lock);
- while (work->fence) {
+ if (work->fence) {
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
down_read(&rdev->exclusive_lock);
}
+ if (r)
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
- if (r) {
- DRM_ERROR("failed to wait on page flip fence (%d)!\n",
- r);
- goto cleanup;
- } else
- radeon_fence_unref(&work->fence);
+ /* We continue with the page flip even if we failed to wait on
+ * the fence, otherwise the DRM core and userspace will be
+ * confused about which BO the CRTC is scanning out
+ */
+
+ radeon_fence_unref(&work->fence);
}
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ /* set the proper interrupt */
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ struct radeon_bo *new_rbo;
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
+ unsigned long flags;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&new_rbo->tbo.bdev->fence_lock);
+ if (new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+ spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
/* pin the new buffer */
- DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, work->new_rbo);
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+ work->old_rbo, new_rbo);
- r = radeon_bo_reserve(work->new_rbo, false);
+ r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -460,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
}
base &= ~7;
}
+ work->base = base;
r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
@@ -470,98 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* set the proper interrupt */
- radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+ if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ r = -EBUSY;
+ goto vblank_cleanup;
+ }
+ radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+ radeon_crtc->flip_work = work;
- /* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- up_read(&rdev->exclusive_lock);
- return;
+ queue_work(radeon_crtc->flip_queue, &work->flip_work);
+ return 0;
+
+vblank_cleanup:
+ drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
pflip_cleanup:
- if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
+ if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
- if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
+ if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
- up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_flip_work *work;
- unsigned long flags;
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- INIT_WORK(&work->flip_work, radeon_flip_work_func);
- INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- work->fb = fb;
- work->event = event;
- /* schedule unpin of the old buffer */
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- obj = old_radeon_fb->obj;
-
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- work->old_rbo = gem_to_radeon_bo(obj);
-
- new_radeon_fb = to_radeon_framebuffer(fb);
- obj = new_radeon_fb->obj;
- work->new_rbo = gem_to_radeon_bo(obj);
-
- spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
- if (work->new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
- spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- if (radeon_crtc->flip_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
- kfree(work);
- return -EBUSY;
- }
- radeon_crtc->flip_work = work;
-
- /* update crtc fb */
- crtc->primary->fb = fb;
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
- return 0;
+ return r;
}
static int
@@ -821,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* don't leak the edid if we already fetched it in detect() */
+ if (radeon_connector->edid)
+ goto got_edid;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -859,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
+got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6e3017413386..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,8 +173,9 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
+int radeon_deep_color = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -242,12 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+ RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
+ if (vm->ib_bo_va)
+ radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ad0e4b8cc7e3..0592ddb0904b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,6 +46,10 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -233,8 +237,8 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[7];
+ struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+ struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -302,6 +306,12 @@ struct radeon_atom_ss {
uint16_t amount;
};
+enum radeon_flip_status {
+ RADEON_FLIP_NONE,
+ RADEON_FLIP_PENDING,
+ RADEON_FLIP_SUBMITTED
+};
+
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
@@ -327,6 +337,7 @@ struct radeon_crtc {
/* page flipping */
struct workqueue_struct *flip_queue;
struct radeon_flip_work *flip_work;
+ enum radeon_flip_status flip_status;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 12c663e86ca1..e447e390d09a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
- if (rdev->asic->dpm.enable_bapm)
- radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ if (rdev->family == CHIP_ARUBA) {
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ }
mutex_unlock(&rdev->pm.mutex);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 899d9126cad6..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
+ INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list;
}
+ if (bo_va->soffset) {
+ /* add a clone of the bo_va to clear the old address */
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
+ tmp->soffset = bo_va->soffset;
+ tmp->eoffset = bo_va->eoffset;
+ tmp->vm = vm;
+ list_add(&tmp->vm_status, &vm->freed);
+ }
+
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@@ -495,7 +509,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
- RADEON_GPU_PAGE_SIZE, false,
+ RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
if (r)
return r;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
+ struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
+ bo_va->bo, vm);
return -EINVAL;
}
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va);
- nptes = radeon_bo_ngpu_pages(bo);
+ nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ list_del(&bo_va->vm_status);
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ kfree(bo_va);
+ if (r)
+ return r;
+ }
+ return 0;
+
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
*
* Object have to be reserved!
*/
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
{
- int r = 0;
+ struct radeon_vm *vm = bo_va->vm;
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset)
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+ list_del(&bo_va->bo_list);
+ mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- return r;
+ if (bo_va->soffset) {
+ bo_va->bo = NULL;
+ list_add(&bo_va->vm_status, &vm->freed);
+ } else {
+ kfree(bo_va);
+ }
+
+ mutex_unlock(&vm->mutex);
}
/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
+ vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
+ INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@@ -992,7 +1030,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
return -ENOMEM;
}
- r = radeon_bo_create(rdev, pd_size, align, false,
+ r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&vm->page_directory);
if (r)
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
- /* disable ss, causes hangs on some cayman boards */
- if (rdev->family == CHIP_CAYMAN) {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- }
-
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 730cee2c34cf..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6376,14 +6377,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
si_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2a2822c03329..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,7 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->enable_bapm = false;
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
+ */
+ if (rdev->pdev->subsystem_vendor == 0x1462)
+ pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a89ad938eacf..b031b48dbb3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
- vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}