summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c58
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig9
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c23
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c118
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/host1x/bus.c5
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
109 files changed, 1301 insertions, 463 deletions
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_UNLOCKED),
};
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+ armada_fbdev_lastclose(dev);
+}
+
static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
.open = NULL,
.preclose = NULL,
.postclose = NULL,
- .lastclose = NULL,
+ .lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
- DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
- dfb->fb.width, dfb->fb.height,
- dfb->fb.bits_per_pixel, obj->phys_addr);
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+ dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ (unsigned long long)obj->phys_addr);
return 0;
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_modeset_lock_all(dev);
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+}
+
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
framebuffer_release(info);
}
+ drm_fb_helper_fini(fbh);
+
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
- drm_fb_helper_fini(fbh);
-
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->dev_addr = obj->linear->start;
}
- DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
- obj, obj->phys_addr, obj->dev_addr);
+ DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+ (unsigned long long)obj->phys_addr,
+ (unsigned long long)obj->dev_addr);
return 0;
}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* refcount on the gem object itself.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buf);
return obj;
}
}
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
}
dobj->obj.import_attach = attach;
+ get_dma_buf(buf);
/*
* Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
/*
@@ -2674,7 +2679,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
int modes = 0;
u8 cea_mode;
- if (video_db == NULL || video_index > video_len)
+ if (video_db == NULL || video_index >= video_len)
return 0;
/* CEA modes are numbered 1..127 */
@@ -2701,7 +2706,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
if (structure & (1 << 8)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
- newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info);
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type = pmode->type;
+ mode->type |= pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_primary_node:
- drm_put_minor(dev->primary);
+ drm_unplug_minor(dev->primary);
err_render_node:
- drm_put_minor(dev->render);
+ drm_unplug_minor(dev->render);
err_control_node:
- drm_put_minor(dev->control);
+ drm_unplug_minor(dev->control);
err_agp:
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index ebc01503d50e..6f3400f3978a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -36,9 +36,9 @@ enum exynos_crtc_mode {
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
- * array when irq interrupt occured. the reason of using this pipe is that
+ * array when irq interrupt occurred. the reason of using this pipe is that
* drm framework doesn't support multiple irq yet.
- * we can refer to the crtc to current hardware interrupt occured through
+ * we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
* @mode: store the crtc mode value
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
+ exynos_drm_subdrv_close(dev, file);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
+ struct drm_pending_vblank_event *v, *vt;
+ struct drm_pending_event *e, *et;
unsigned long flags;
- /* release events of current file */
+ if (!file->driver_priv)
+ return;
+
+ /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(e, t, &private->pageflip_event_list,
+ list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
base.link) {
- if (e->base.file_priv == file) {
- list_del(&e->base.link);
- e->base.destroy(&e->base);
+ if (v->base.file_priv == file) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
- exynos_drm_subdrv_close(dev, file);
-}
+ /* Release all events handled by page flip handler but not freed. */
+ list_for_each_entry_safe(e, et, &file->event_list, link) {
+ list_del(&e->link);
+ e->destroy(e);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
-static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
-{
- if (!file->driver_priv)
- return;
kfree(file->driver_priv);
file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8adfc8f1e08f..30d76b2ff9c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -345,7 +345,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
fimc_write(cfg, EXYNOS_CIWDOFST);
- dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
#include "exynos_drm_iommu.h"
/*
- * FIMD is stand for Fully Interactive Mobile Display and
+ * FIMD stands for Fully Interactive Mobile Display and
* as a display controller, it transfers contents drawn on memory
* to a LCD Panel through Display Interfaces such as RGB or
* CPU Interface.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 7bccedca487a..380aec28840b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1126,7 +1126,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
* G2D interrupt event once current command list execution is
* finished.
* Otherwise only ACF bit should be set to INTEN register so
- * that one interrupt is occured after all command lists
+ * that one interrupt is occurred after all command lists
* have been completed.
*/
if (node->event) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1ade191d84f4..be59d50d8b16 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -652,7 +652,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
int ret;
/*
- * alocate memory to be used for framebuffer.
+ * allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 702ec3abe85c..b8c818ba2ff4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -60,7 +60,7 @@ struct exynos_drm_gem_buf {
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
- * P.S. this object would be transfered to user as kms_bo.handle so
+ * P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index cd6aebd53bd0..fa75059a6104 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1301,13 +1301,13 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
- dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return IRQ_NONE;
}
if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
- dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n",
ctx->id, status);
buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 824e0705c8d3..d519a4e5fe40 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -335,7 +335,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
} else {
/*
* Getting ippdrv capability by ipp_id.
- * some deivce not supported wb, output interface.
+ * some device not supported wb, output interface.
* so, user application detect correct ipp driver
* using this ioctl.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 4cadbea7dbde..ab1634befc05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
- * @priv: IPP private infomation.
+ * @priv: IPP private information.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@@ -92,7 +92,7 @@ struct drm_exynos_ipp_buf_info {
};
/*
- * A structure of wb setting infomation.
+ * A structure of wb setting information.
*
* @enable: enable flag for wb.
* @refresh: HZ of the refresh rate.
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
+ /*
+ * The dri breadcrumb update races against the drm master disappearing.
+ * Instead of trying to fix this (this is by far not the only ums issue)
+ * just don't do the update in kms mode.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
- mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_pm_setup(dev);
intel_display_crc_init(dev);
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
intel_irq_init(dev);
- intel_pm_init(dev);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
+ mutex_unlock(&dev->mode_config.mutex);
intel_modeset_suspend_hw(dev);
}
@@ -649,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev) (IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ (((dev)->pdev->device & 0xf) == 0x2 || \
+ ((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..76d3d1ab73c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- u32 completed_seqno;
- u32 acthd;
+ u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 acthd = intel_ring_get_active_head(ring);
+ struct drm_i915_gem_request *request;
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- acthd = intel_ring_get_active_head(ring);
- completed_seqno = ring->get_seqno(ring, false);
+ i915_set_reset_status(ring, request, acthd);
+ }
+}
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
- if (request->seqno > completed_seqno)
- i915_set_reset_status(ring, request, acthd);
-
i915_gem_free_request(request);
}
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring;
int i;
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, ring);
+
for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev);
@@ -4442,10 +4456,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HSW_GT3(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
- else
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
- mutex_unlock(&dev->struct_mutex);
}
static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
if (ret)
return ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ /*
+ * Pin can switch back to the default context if we end up calling into
+ * evict_everything - as a last ditch gtt defrag effort that also
+ * switches to the default context. Hence we need to reload from here.
+ */
+ from = ring->last_context;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
- * XXX: We need a real interface to do this instead of trickery. */
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = i915_gem_object_get_pages(obj);
if (ret)
- goto error;
+ goto err;
+
+ i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
- goto error;
+ goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
- goto error;
+ goto err_unpin;
obj->vmapping_count = 1;
- i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
-error:
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
list_del_init(&vma->exec_list);
}
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_vm() is unnecessary.
+ /* Can we unpin some objects such as idle hw contents,
+ * or pending flips?
*/
- return -ENOSPC;
+ ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ /* Only idle the GPU and repeat the search once */
+ i915_gem_retire_requests(dev);
+ nonblocking = true;
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+
struct eb_vmas {
struct list_head vmas;
int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{
struct drm_i915_gem_object *obj;
struct list_head objects;
- int i, ret = 0;
+ int i, ret;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
- goto out;
+ goto err;
}
if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
- goto out;
+ goto err;
}
drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock);
i = 0;
- list_for_each_entry(obj, &objects, obj_exec_link) {
+ while (!list_empty(&objects)) {
struct i915_vma *vma;
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
- goto out;
+ goto err;
}
+ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i;
}
+ return 0;
-out:
+
+err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- if (ret)
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
}
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
return ret;
}
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
}
}
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return;
+
+ entry = vma->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
@@ -478,9 +515,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
return ret;
}
-#define __EXEC_OBJECT_HAS_PIN (1<<31)
-#define __EXEC_OBJECT_HAS_FENCE (1<<30)
-
static int
need_reloc_mappable(struct i915_vma *vma)
{
@@ -552,26 +586,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry;
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!drm_mm_node_allocated(&vma->node))
- return;
-
- entry = vma->exec_entry;
-
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_gem_object_unpin_fence(obj);
-
- if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
-
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@@ -670,13 +684,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
goto err;
}
-err: /* Decrement pin count for bound objects */
- list_for_each_entry(vma, vmas, exec_list)
- i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
if (ret != -ENOSPC || retry++)
return ret;
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
+
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
@@ -708,6 +723,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..3540569948db 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE0;
+ pte |= HSW_WT_ELLC_LLC_AGE3;
break;
default:
- pte |= HSW_WB_ELLC_LLC_AGE0;
+ pte |= HSW_WB_ELLC_LLC_AGE3;
break;
}
@@ -335,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
- __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
- __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+ __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
/**
@@ -904,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
-#if 0 /* TODO: Still needed on GEN8? */
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-#endif
}
/*
@@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ if (bdw_gmch_ctl > 4) {
+ WARN_ON(!i915_preliminary_hw_support);
+ return 4<<20;
+ }
+
return bdw_gmch_ctl << 20;
}
@@ -1260,14 +1265,14 @@ static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- phys_addr_t gtt_bus_addr;
+ phys_addr_t gtt_phys_addr;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+ gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
- dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1dedc02f15..f13d5edc39d5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
#undef GEN8_IRQ_INIT_NDX
POSTING_READ(GEN8_PCU_IIR);
+
+ ibx_irq_preinstall(dev);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 330077bcd0bd..b69dc3e66c16 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_D:
- if (intel_dpd_is_edp(dev))
+ if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp;
else
ddi_translations = ddi_translations_dp;
@@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *intel_crtc;
+ dev_priv->ddi_plls.spll_refcount = 0;
+ dev_priv->ddi_plls.wrpll1_refcount = 0;
+ dev_priv->ddi_plls.wrpll2_refcount = 0;
+
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- if (!intel_crtc->active)
+ if (!intel_crtc->active) {
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
+ }
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
@@ -1158,9 +1164,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
- if (type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ec8b488bb1d..2bde35d34eb9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
uint16_t postoff = 0;
if (intel_crtc->config.limited_color_range)
- postoff = (16 * (1 << 13) / 255) & 0x1fff;
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
}
void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8354,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
@@ -9134,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!IS_HASWELL(dev)) {
+ if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
@@ -10049,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
- dpd_is_edp = intel_dpd_is_edp(dev);
+ dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
@@ -10086,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
- PORT_C);
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
intel_dsi_init(dev);
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /*
- * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
- * seem to use inverted backlight PWM.
- */
- { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11036,8 +11045,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
intel_modeset_check_state(dev);
-
- drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
+ mutex_unlock(&dev->mode_config.mutex);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11125,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b2e842fef01..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
}
/* check the VBT to see whether the eDP is on DP-D port */
-bool intel_dpd_is_edp(struct drm_device *dev)
+bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child;
int i;
+ static const short port_mapping[] = {
+ [PORT_B] = PORT_IDPB,
+ [PORT_C] = PORT_IDPC,
+ [PORT_D] = PORT_IDPD,
+ };
+
+ if (port == PORT_A)
+ return true;
if (!dev_priv->vbt.child_dev_num)
return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == PORT_IDPD &&
+ if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- type = DRM_MODE_CONNECTOR_DisplayPort;
- /*
- * FIXME : We need to initialize built-in panels before external panels.
- * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
- */
- switch (port) {
- case PORT_A:
+ if (intel_dp_is_edp(dev, port))
type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_C:
- if (IS_VALLEYVIEW(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_D:
- if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default: /* silence GCC warning */
- break;
- }
+ else
+ type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
-bool intel_dpd_is_edp(struct drm_device *dev);
+bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROADWELL(dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
return val;
}
+static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, pipe, level);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROADWELL(dev))
+ return intel_bdw_panel_set_backlight(dev, level);
+ else if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev) &&
+ if (IS_BROADWELL(dev)) {
+ /*
+ * Broadwell requires PCH override to drive the PCH
+ * backlight pin. The above will configure the CPU
+ * backlight pin, which we don't plan to use.
+ */
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ } else if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index caf2ee4e5441..26c29c173221 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
+ int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
@@ -1624,7 +1624,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
+ int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
- linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
- ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
+ linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ mode->crtc_clock);
+ ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -5684,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
+ unsigned long irqflags;
uint32_t tmp;
+ WARN_ON(dev_priv->pc8.enabled);
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5701,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
} else {
if (enable_requested) {
- unsigned long irqflags;
enum pipe p;
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5730,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
- if (!power_well->count++)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!power_well->count++) {
+ hsw_disable_package_c8(dev_priv);
__intel_set_power_well(dev, true);
+ }
}
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well)
+ if (!--power_well->count && i915_disable_power_well) {
__intel_set_power_well(dev, false);
+ hsw_enable_package_c8(dev_priv);
+ }
}
void intel_display_power_get(struct drm_device *dev,
@@ -6129,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
-void intel_pm_init(struct drm_device *dev)
+void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ mutex_init(&dev_priv->rps.hw_lock);
+
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
+ /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
nouveau-y += core/subdev/clock/nv84.o
nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o
nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (parent) {
struct nouveau_device *device = nv_device(parent);
- int subidx = nv_hclass(subdev) & 0xff;
-
subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
subdev->mmio = nv_subdev(device)->mmio;
- device->subdev[subidx] = *pobject;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ device->subdev[i] = devobj->subdev[i];
+
/* note: can't init *any* subdevs until devinit has been run
* due to not knowing exactly what the vbios init tables will
* mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv50_fifo_cclass;
nv_engine(priv)->sclass = nv50_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
nv_engine(priv)->sclass = nv84_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
while ((mthd = &mthds[i++]) && (init = mthd->init)) {
u32 addr = 0x80000000 | mthd->oclass;
for (data = 0; init->count; init++) {
- if (data != init->data) {
+ if (init == mthd->init || data != init->data) {
nv_wr32(priv, 0x40448c, init->data);
data = init->data;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- chan->vblank.nr_event = pdisp->vblank->index_nr;
+ chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
chan->vblank.event = kzalloc(chan->vblank.nr_event *
sizeof(*chan->vblank.event), GFP_KERNEL);
if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
nv_clk_src_hclk,
nv_clk_src_hclkm3,
nv_clk_src_hclkm3d2,
+ nv_clk_src_hclkm2d3, /* NVAA */
+ nv_clk_src_hclkm4, /* NVAA */
+ nv_clk_src_cclk, /* NVAA */
nv_clk_src_host,
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
extern struct nouveau_oclass *nv50_clock_oclass;
extern struct nouveau_oclass *nv84_clock_oclass;
+extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
static inline struct nouveau_fb *
nouveau_fb(void *obj)
{
+ /* fbram uses this before device subdev pointer is valid */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_FB)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
}
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *));
+ struct i2c_board_info *, void *), void *);
struct list_head ports;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
static inline struct nouveau_instmem *
nouveau_instmem(void *obj)
{
+ /* nv04/nv40 impls need to create objects in their constructor,
+ * which is before the subdev pointer is valid
+ */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
init_script(struct nouveau_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
- u16 data;
+ u16 bmp_ver = bmp_version(bios), data;
- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
- if (index > 1)
+ if (bmp_ver && bmp_ver < 0x0510) {
+ if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nv_ro16(bios, data + (index * 2));
}
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
u16 offset = nv_ro16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
- init->offset = offset;
+
+ if (init_exec(init))
+ init->offset = offset;
+ else
+ init->offset += 3;
}
/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
return 0;
}
+static struct nouveau_clocks
+nv04_domain[] = {
+ { nv_clk_src_max }
+};
+
static int
nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/fifo.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/clock.h>
+
+#include "pll.h"
+
+struct nvaa_clock_priv {
+ struct nouveau_clock base;
+ enum nv_clk_src csrc, ssrc, vsrc;
+ u32 cctrl, sctrl;
+ u32 ccoef, scoef;
+ u32 cpost, spost;
+ u32 vdiv;
+};
+
+static u32
+read_div(struct nouveau_clock *clk)
+{
+ return nv_rd32(clk, 0x004600);
+}
+
+static u32
+read_pll(struct nouveau_clock *clk, u32 base)
+{
+ u32 ctrl = nv_rd32(clk, base + 0);
+ u32 coef = nv_rd32(clk, base + 4);
+ u32 ref = clk->read(clk, nv_clk_src_href);
+ u32 post_div = 0;
+ u32 clock = 0;
+ int N1, M1;
+
+ switch (base){
+ case 0x4020:
+ post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+ break;
+ case 0x4028:
+ post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+ break;
+ default:
+ break;
+ }
+
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clock = ref * N1 / M1;
+ clock = clock / post_div;
+ }
+
+ return clock;
+}
+
+static int
+nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ u32 mast = nv_rd32(clk, 0x00c054);
+ u32 P = 0;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case nv_clk_src_hclkm4:
+ return clk->read(clk, nv_clk_src_href) * 4;
+ case nv_clk_src_hclkm2d3:
+ return clk->read(clk, nv_clk_src_href) * 2 / 3;
+ case nv_clk_src_host:
+ switch (mast & 0x000c0000) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00040000: break;
+ case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+ }
+ break;
+ case nv_clk_src_core:
+ P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+
+ switch (mast & 0x00000003) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000001: return 0;
+ case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+ case 0x00000003: return read_pll(clk, 0x004028) >> P;
+ }
+ break;
+ case nv_clk_src_cclk:
+ if ((mast & 0x03000000) != 0x03000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ if ((mast & 0x00000200) == 0x00000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ switch (mast & 0x00000c00) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+ default: return 0;
+ }
+ case nv_clk_src_shader:
+ P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000040)
+ return clk->read(clk, nv_clk_src_href) >> P;
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(clk, 0x004028) >> P;
+ case 0x00000030: return read_pll(clk, 0x004020) >> P;
+ }
+ break;
+ case nv_clk_src_mem:
+ return 0;
+ break;
+ case nv_clk_src_vdec:
+ P = (read_div(clk) & 0x00000700) >> 8;
+
+ switch (mast & 0x00400000) {
+ case 0x00400000:
+ return clk->read(clk, nv_clk_src_core) >> P;
+ break;
+ default:
+ return 500000 >> P;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
+static u32
+calc_pll(struct nvaa_clock_priv *priv, u32 reg,
+ u32 clock, int *N, int *M, int *P)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll pll;
+ struct nouveau_clock *clk = &priv->base;
+ int ret;
+
+ ret = nvbios_pll_parse(bios, reg, &pll);
+ if (ret)
+ return 0;
+
+ pll.vco2.max_freq = 0;
+ pll.refclk = clk->read(clk, nv_clk_src_href);
+ if (!pll.refclk)
+ return 0;
+
+ return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_P(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
+ }
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static int
+nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ const int shader = cstate->domain[nv_clk_src_shader];
+ const int core = cstate->domain[nv_clk_src_core];
+ const int vdec = cstate->domain[nv_clk_src_vdec];
+ u32 out = 0, clock = 0;
+ int N, M, P1, P2 = 0;
+ int divs = 0;
+
+ /* cclk: find suitable source, disable PLL if we can */
+ if (core < clk->read(clk, nv_clk_src_hclkm4))
+ out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+
+ /* Calculate clock * 2, so shader clock can use it too */
+ clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+
+ if (abs(core - out) <=
+ abs(core - (clock >> 1))) {
+ priv->csrc = nv_clk_src_hclkm4;
+ priv->cctrl = divs << 16;
+ } else {
+ /* NVCTRL is actually used _after_ NVPOST, and after what we
+ * call NVPLL. To make matters worse, NVPOST is an integer
+ * divider instead of a right-shift number. */
+ if(P1 > 2) {
+ P2 = P1 - 2;
+ P1 = 2;
+ }
+
+ priv->csrc = nv_clk_src_core;
+ priv->ccoef = (N << 8) | M;
+
+ priv->cctrl = (P2 + 1) << 16;
+ priv->cpost = (1 << P1) << 16;
+ }
+
+ /* sclk: nvpll + divisor, href or spll */
+ out = 0;
+ if (shader == clk->read(clk, nv_clk_src_href)) {
+ priv->ssrc = nv_clk_src_href;
+ } else {
+ clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ if (priv->csrc == nv_clk_src_core) {
+ out = calc_P((core << 1), shader, &divs);
+ }
+
+ if (abs(shader - out) <=
+ abs(shader - clock) &&
+ (divs + P2) <= 7) {
+ priv->ssrc = nv_clk_src_core;
+ priv->sctrl = (divs + P2) << 16;
+ } else {
+ priv->ssrc = nv_clk_src_shader;
+ priv->scoef = (N << 8) | M;
+ priv->sctrl = P1 << 16;
+ }
+ }
+
+ /* vclk */
+ out = calc_P(core, vdec, &divs);
+ clock = calc_P(500000, vdec, &P1);
+ if(abs(vdec - out) <=
+ abs(vdec - clock)) {
+ priv->vsrc = nv_clk_src_cclk;
+ priv->vdiv = divs << 16;
+ } else {
+ priv->vsrc = nv_clk_src_vdec;
+ priv->vdiv = P1 << 16;
+ }
+
+ /* Print strategy! */
+ nv_debug(priv, "nvpll: %08x %08x %08x\n",
+ priv->ccoef, priv->cpost, priv->cctrl);
+ nv_debug(priv, " spll: %08x %08x %08x\n",
+ priv->scoef, priv->spost, priv->sctrl);
+ nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
+ if (priv->csrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "core: hrefm4\n");
+ else
+ nv_debug(priv, "core: nvpll\n");
+
+ if (priv->ssrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "shader: hrefm4\n");
+ else if (priv->ssrc == nv_clk_src_core)
+ nv_debug(priv, "shader: nvpll\n");
+ else
+ nv_debug(priv, "shader: spll\n");
+
+ if (priv->vsrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "vdec: 500MHz\n");
+ else
+ nv_debug(priv, "vdec: core\n");
+
+ return 0;
+}
+
+static int
+nvaa_clock_prog(struct nouveau_clock *clk)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ struct nouveau_fifo *pfifo = nouveau_fifo(clk);
+ unsigned long flags;
+ u32 pllmask = 0, mast, ptherm_gate;
+ int ret = -EBUSY;
+
+ /* halt and idle execution engines */
+ ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+ /* Wait until the interrupt handler is finished */
+ if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ goto resume;
+
+ if (pfifo)
+ pfifo->pause(pfifo, &flags);
+
+ if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ goto resume;
+ if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+ goto resume;
+
+ /* First switch to safe clocks: href */
+ mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+ mast &= ~0x00400e73;
+ mast |= 0x03000000;
+
+ switch (priv->csrc) {
+ case nv_clk_src_hclkm4:
+ nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+ mast |= 0x00000002;
+ break;
+ case nv_clk_src_core:
+ nv_wr32(clk, 0x402c, priv->ccoef);
+ nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
+ nv_wr32(clk, 0x4040, priv->cpost);
+ pllmask |= (0x3 << 8);
+ mast |= 0x00000003;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown core clock\n");
+ goto resume;
+ }
+
+ switch (priv->ssrc) {
+ case nv_clk_src_href:
+ nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+ /* mast |= 0x00000000; */
+ break;
+ case nv_clk_src_core:
+ nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+ mast |= 0x00000020;
+ break;
+ case nv_clk_src_shader:
+ nv_wr32(clk, 0x4024, priv->scoef);
+ nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
+ nv_wr32(clk, 0x4070, priv->spost);
+ pllmask |= (0x3 << 12);
+ mast |= 0x00000030;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+ goto resume;
+ }
+
+ if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
+ nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+ goto resume;
+ }
+
+ switch (priv->vsrc) {
+ case nv_clk_src_cclk:
+ mast |= 0x00400000;
+ default:
+ nv_wr32(clk, 0x4600, priv->vdiv);
+ }
+
+ nv_wr32(clk, 0xc054, mast);
+ ret = 0;
+
+resume:
+ if (pfifo)
+ pfifo->start(pfifo, &flags);
+
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
+ nv_wr32(clk, 0x020060, ptherm_gate);
+
+ /* Disable some PLLs and dividers when unused */
+ if (priv->csrc != nv_clk_src_core) {
+ nv_wr32(clk, 0x4040, 0x00000000);
+ nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+ }
+
+ if (priv->ssrc != nv_clk_src_shader) {
+ nv_wr32(clk, 0x4070, 0x00000000);
+ nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+ }
+
+ return ret;
+}
+
+static void
+nvaa_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nvaa_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
+ { nv_clk_src_max }
+};
+
+static int
+nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvaa_clock_priv *priv;
+ int ret;
+
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.read = nvaa_clock_read;
+ priv->base.calc = nvaa_clock_calc;
+ priv->base.prog = nvaa_clock_prog;
+ priv->base.tidy = nvaa_clock_tidy;
+ return 0;
+}
+
+struct nouveau_oclass *
+nvaa_clock_oclass = &(struct nouveau_oclass) {
+ .handle = NV_SUBDEV(CLOCK, 0xaa),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvaa_clock_ctor,
+ .dtor = _nouveau_clock_dtor,
+ .init = _nouveau_clock_init,
+ .fini = _nouveau_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
struct nouveau_i2c_board_info *info,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *))
+ struct i2c_board_info *, void *), void *data)
{
struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
if (nv_probe_i2c(port, info[i].dev.addr) &&
- (!match || match(port, &info[i].dev))) {
+ (!match || match(port, &info[i].dev, data))) {
nv_info(i2c, "detected %s: %s\n", what,
info[i].dev.type);
return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index af129c2e8113..64f8b4702bf7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
static int
mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
{
- struct nouveau_mxm *mxm = nouveau_mxm(bios);
+ struct nouveau_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
@@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
return;
}
- dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+ dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
- struct i2c_board_info *info)
+ struct i2c_board_info *info, void *data)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+ struct nouveau_therm_priv *priv = data;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
device. Let's try our static list.
*/
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- nv_board_infos, probe_monitoring_device);
+ nv_board_infos, probe_monitoring_device, therm);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
- type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
+ type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
};
static uint32_t formats[] = {
- DRM_FORMAT_NV12,
DRM_FORMAT_UYVY,
+ DRM_FORMAT_NV12,
};
/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
bool flip = nv_plane->flip;
- int format = ALIGN(src_w * 4, 0x100);
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
- int ret;
+ int format, ret;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x >>= 16;
+ src_y >>= 16;
+ src_w >>= 16;
+ src_h >>= 16;
+
+ format = ALIGN(src_w * 4, 0x100);
if (format > 0xffff)
- return -EINVAL;
+ return -ERANGE;
+
+ if (dev->chipset >= 0x30) {
+ if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
+ return -ERANGE;
+ } else {
+ if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
+ return -ERANGE;
+ }
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- /* Source parameters given in 16.16 fixed point, ignore fractional. */
- src_x = src_x >> 16;
- src_y = src_y >> 16;
- src_w = src_w >> 16;
- src_h = src_h >> 16;
-
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
{
struct nouveau_device *dev = nouveau_dev(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
return;
+ switch (dev->chipset) {
+ case 0x10:
+ case 0x11:
+ case 0x15:
+ case 0x1a:
+ case 0x20:
+ num_formats = 1;
+ break;
+ }
+
ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
&nv10_plane_funcs,
- formats, ARRAY_SIZE(formats), false);
+ formats, num_formats, false);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
- nv04_tv_encoder_info, NULL);
+ nv04_tv_encoder_info, NULL, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ info->offset = ntfy->node->offset;
+
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
+ acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM"))
+ if (!acpi_has_method(dhandle, "_DSM")) {
+ nouveau_dsm_priv.other_handle = dhandle;
return false;
-
+ }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when _DSM is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
+ acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,8 +608,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan);
+ nouveau_fence_unref(&fence);
if (ret)
- return ret;
+ goto fail_free;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event)
- drm_send_vblank_event(dev, -1, s->event);
+ drm_send_vblank_event(dev, s->crtc, s->event);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0)
return -EINVAL;
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EINVAL;
+ }
+
nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
+ u32 end = min_t(u32, start + size, 256);
u32 i;
for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..38c2bb72e456 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -5,8 +5,11 @@ config DRM_QXL
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
- select DRM_TTM
+ select DRM_TTM
+ select CRC32
help
- QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
+ QXL virtual GPU for Spice virtualization desktop integration.
+ Do not enable this driver unless your distro ships a corresponding
+ X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
*/
-#include "linux/crc32.h"
+#include <linux/crc32.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_BONAIRE)
- tmp = rdev->config.cik.tile_config;
- else if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
+ /* Set NUM_BANKS. */
+ if (rdev->family >= CHIP_BONAIRE) {
+ unsigned tileb, index, num_banks, tile_split_bytes;
+
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
+
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ } else {
+ /* SI and older. */
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
if (rdev->family >= CHIP_BONAIRE) {
- u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
- u32 num_rb = rdev->config.cik.max_backends_per_se;
- if (num_pipe_configs > 8)
- num_pipe_configs = 8;
- if (num_pipe_configs == 8)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
- else if (num_pipe_configs == 4) {
- if (num_rb == 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
- else if (num_rb < 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
- } else if (num_pipe_configs == 2)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+ /* Read the pipe config from the 2D TILED SCANOUT mode.
+ * It should be the same for the other modes too, but not all
+ * modes set the pipe config field. */
+ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+ fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
- else if (rdev->family == CHIP_VERDE)
+ else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 0652ee0a2098..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
- u16 out;
+ u16 out = cpu_to_le16(0);
memset(&args, 0, sizeof(args));
@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- args.ucRegIndex = buf[0];
- if (num > 1) {
+ if (buf == NULL)
+ args.ucRegIndex = 0;
+ else
+ args.ucRegIndex = buf[0];
+ if (num)
num--;
+ if (num)
memcpy(&out, &buf[1], num);
- }
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
- u8 buf = 0, flags;
+ u8 flags;
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
- &buf, 1);
+ NULL, 0);
if (ret)
return ret;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e950fabd7f5e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
* Returns the disabled RB bitmask.
*/
static u32 cik_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
*/
static void cik_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
- data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
if (rdev->family == CHIP_HAWAII)
disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.cik.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, 0); /* src/dst endian swap */
radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset = dig->afmt->offset;
+ u32 offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
+ offset = dig->afmt->offset;
+
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0, offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -172,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -233,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
@@ -306,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev))
- rdev->audio.num_pins = 7;
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE61(rdev))
+ rdev->audio.num_pins = 4;
else
rdev->audio.num_pins = 6;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index af85299f2126..4a85bb644e24 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -655,7 +655,7 @@ static int parser_auth(struct table *t, const char *filename)
/* first line will contain the last register
* and gpu name */
- sscanf(buf, "%s %s", gpu_name, last_reg_s);
+ sscanf(buf, "%9s %9s", gpu_name, last_reg_s);
t->gpu_prefix = gpu_name;
last_reg = strtol(last_reg_s, NULL, 16);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
- rdev->config.cayman.sx_max_export_size = 256;
- rdev->config.cayman.sx_max_export_pos_size = 64;
- rdev->config.cayman.sx_max_export_smx_size = 192;
- rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index cdc003085a76..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
- u32 mclk, sclk;
- u16 vddc, vddci;
+ u32 mclk;
+ u16 vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
/* XXX validate the min clocks required for display */
+ /* adjust low state */
if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
- } else {
- sclk = ps->performance_levels[0].sclk;
- mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[0].vddci;
+ ps->performance_levels[0].mclk =
+ ps->performance_levels[ps->performance_level_count - 1].mclk;
+ ps->performance_levels[0].vddci =
+ ps->performance_levels[ps->performance_level_count - 1].vddci;
}
- /* adjusted low state */
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
- ps->performance_levels[0].vddc = vddc;
- ps->performance_levels[0].vddci = vddci;
-
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[0].sclk,
&ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
}
+ /* adjust remaining states */
if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk;
+ vddci = ps->performance_levels[0].vddci;
for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk;
+ if (vddci < ps->performance_levels[i].vddci)
+ vddci = ps->performance_levels[i].vddci;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else if (ASIC_IS_DCE3(rdev)) {
+ } else {
/* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE3.0/3.1 as well.
+ * practice it seems to cover DCE2.0/3.0/3.1 as well.
*/
if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else {
- /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
- WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
- AUDIO_DTO_MODULE(clock / 10));
}
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ecf2a3960c07..45e1f447bc79 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem);
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
mpll_param->dll_speed = args.ucDllSpeed;
mpll_param->bwcntl = args.ucBWCntl;
mpll_param->vco_mode =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
+ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
mpll_param->yclk_sel =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
+ acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ radeon_atpx_priv.other_handle = dhandle;
return false;
-
+ }
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when ATPX is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
+ acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f41594b2eeac..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_bo *bo;
int r;
- r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) {
return r;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo;
- r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..db39ea36bf22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
+ * 2.36.0 - Fix CIK DCE tiling setup
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-
-static void
-radeon_pci_shutdown(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- radeon_driver_unload_kms(dev);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.driver.pm = &radeon_pm_ops,
- .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
* 1.33- Add r6xx/r7xx const buffer support
+ * 1.34- fix evergreen/cayman GS register
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 33
+#define DRIVER_MINOR 34
#define DRIVER_PATCHLEVEL 0
long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3044e504f4ec..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_trace.h"
/*
* GART
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
+ trace_radeon_vm_grab_id(vm->id, ring);
return rdev->vm_manager.active[choices[i]];
}
}
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
/**
- * radeon_vm_bo_update_pte - map a bo into the vm page table
+ * radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
*
* Object have to be reserved & global and local mutex must be locked!
*/
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem)
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
{
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
+ trace_radeon_vm_bo_update(bo_va);
+
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
@@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
- r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+ r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
}
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..21d593c0ecaf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
+ case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+ if (rdev->family >= CHIP_BONAIRE) {
+ *value = rdev->config.cik.backend_enable_mask;
+ } else if (rdev->family >= CHIP_TAHITI) {
+ *value = rdev->config.si.backend_enable_mask;
+ } else {
+ DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d1385ccc672c..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int temp;
if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
-static ssize_t radeon_hwmon_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "radeon\n");
-}
-
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
NULL
};
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
.is_visible = hwmon_attributes_visible,
};
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
-
- rdev->pm.int_hwmon_dev = NULL;
+ struct device *hwmon_dev;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
- rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
- if (IS_ERR(rdev->pm.int_hwmon_dev)) {
- err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+ "radeon", rdev,
+ hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
- break;
- }
- dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
- err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
- &hwmon_attrgroup);
- if (err) {
- dev_err(rdev->dev,
- "Unable to create hwmon sysfs file: %d\n", err);
- hwmon_device_unregister(rdev->dev);
}
break;
default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
return err;
}
-static void radeon_hwmon_fini(struct radeon_device *rdev)
-{
- if (rdev->pm.int_hwmon_dev) {
- sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
- hwmon_device_unregister(rdev->pm.int_hwmon_dev);
- }
-}
-
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9f0e18172b6e..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
__entry->fences)
);
+TRACE_EVENT(radeon_vm_grab_id,
+ TP_PROTO(unsigned vmid, int ring),
+ TP_ARGS(vmid, ring),
+ TP_STRUCT__entry(
+ __field(u32, vmid)
+ __field(u32, ring)
+ ),
+
+ TP_fast_assign(
+ __entry->vmid = vmid;
+ __entry->ring = ring;
+ ),
+ TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+);
+
+TRACE_EVENT(radeon_vm_bo_update,
+ TP_PROTO(struct radeon_bo_va *bo_va),
+ TP_ARGS(bo_va),
+ TP_STRUCT__entry(
+ __field(u64, soffset)
+ __field(u64, eoffset)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->soffset = bo_va->soffset;
+ __entry->eoffset = bo_va->eoffset;
+ __entry->flags = bo_va->flags;
+ ),
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ __entry->soffset, __entry->eoffset, __entry->flags)
+);
+
TRACE_EVENT(radeon_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if ((start >> 28) != (end >> 28)) {
+ if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ /* Some boards seem to be configured for 128MB of sideport memory,
+ * but really only have 64MB. Just skip the sideport and use
+ * UMA memory.
+ */
+ if (rdev->mc.igp_sideport_enabled &&
+ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+ base += 128 * 1024 * 1024;
+ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ }
/* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
+ /* disable ss, causes hangs on some cayman boards */
+ if (rdev->family == CHIP_CAYMAN) {
+ pi->sclk_ss = false;
+ pi->mclk_ss = false;
+ }
+
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..85e1edfaa3be 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
}
static u32 si_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
- data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.si.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3882,8 +3884,15 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(CONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
struct drm_tegra_cmdbuf __user *cmdbufs =
- (void * __user)(uintptr_t)args->cmdbufs;
+ (void __user *)(uintptr_t)args->cmdbufs;
struct drm_tegra_reloc __user *relocs =
- (void * __user)(uintptr_t)args->relocs;
+ (void __user *)(uintptr_t)args->relocs;
struct drm_tegra_waitchk __user *waitchks =
- (void * __user)(uintptr_t)args->waitchks;
+ (void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt;
struct host1x_job *job;
int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
- err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
- if (err)
+ if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
+ err = -EFAULT;
goto fail;
+ }
bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- err = copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs);
- if (err)
+ if (copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs)) {
+ err = -EFAULT;
goto fail;
+ }
while (num_relocs--) {
struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
- err = copy_from_user(job->waitchk, waitchks,
- sizeof(*waitchks) * num_waitchks);
- if (err)
+ if (copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks)) {
+ err = -EFAULT;
goto fail;
+ }
- err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
- sizeof(syncpt));
- if (err)
+ if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
+ sizeof(syncpt))) {
+ err = -EFAULT;
goto fail;
+ }
job->is_addr_reg = context->client->ops->is_addr_reg;
job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
}
#endif
-struct drm_driver tegra_drm_driver = {
+static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
{
- return container_of(crtc, struct tegra_dc, base);
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
}
static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->var.yoffset * fb->pitches[0];
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
- info->screen_base = bo->vaddr + offset;
+ info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
struct tegra_rgb {
struct tegra_output output;
+ struct tegra_dc *dc;
+
struct clk *clk_parent;
struct clk *clk;
};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+ tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
return 0;
}
static int tegra_output_rgb_disable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
return 0;
}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
+ rgb->dc = dc;
err = tegra_output_probe(&rgb->output);
if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
- (ttm == NULL || ttm->state == tt_unpopulated)) {
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
- page_last = vma_pages(vma) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
bool mapped;
};
+const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+
/**
* Helper functions to advance a struct vmw_piter iterator.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index db85985c7086..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
* TTM buffer object driver - vmwgfx_buffer.c
*/
+extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION));
break;
}
+ case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+ param->value = dev_priv->memory_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_sysfs_connector_remove(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
+ crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu);
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->x = set->x;
crtc->y = set->y;
crtc->mode = *mode;
+ crtc->enabled = true;
vmw_ldu_add_active(dev_priv, ldu, vfb);
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index efe2b74c5eb1..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
/**
* Buffer management.
*/
+
+/**
+ * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
kfree(vmw_bo);
}
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
+ bool user = (bo_free == &vmw_user_dmabuf_destroy);
- BUG_ON(!bo_free);
+ BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+ acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ (user) ? ttm_bo_type_device :
+ ttm_bo_type_kernel, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
@@ -781,54 +815,55 @@ err_ref:
}
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
+ * that the arguments have a different format.
+ */
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (vmw_user_bo == NULL)
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (ret != 0) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (ret != 0)
- goto out_no_dmabuf;
-
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
- args->size,
- &vmw_user_bo->prime,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &dma_buf);
if (unlikely(ret != 0))
- goto out_no_base_object;
-
- args->handle = vmw_user_bo->prime.base.hash.key;
+ goto out_no_dmabuf;
-out_no_base_object:
- ttm_bo_unref(&tmp);
+ vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
return ret;
}
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
@@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
return 0;
}
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
vmw_sou_del_active(dev_priv, sou);
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
return ret;
}
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
+ crtc->enabled = true;
return 0;
}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 509383f8be03..6a929591aa73 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/slab.h>
+#include "bus.h"
#include "dev.h"
static DEFINE_MUTEX(clients_lock);
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
return -ENODEV;
}
-struct bus_type host1x_bus_type = {
+static struct bus_type host1x_bus_type = {
.name = "host1x",
};
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
device->dev.release = host1x_device_release;
- dev_set_name(&device->dev, driver->name);
+ dev_set_name(&device->dev, "%s", driver->name);
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 37e2a63241a9..6b09b71940c2 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
u32 *p = (u32 *)((u32)pb->mapped + getptr);
*(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP;
- dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
- pb->phys + getptr);
+ dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
+ (u64)pb->phys + getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
wmb();
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 640c75ca5a8b..f72c873eff81 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue;
}
- host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
- g->base, g->offset, g->words);
+ host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
+ (u64)g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped);