summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-09-14 11:17:43 +0300
committerThomas Hellstrom <thellstrom@vmware.com>2015-09-14 11:17:43 +0300
commit2e586a7e017a502410ba1c1a7411179e1d3fbb2b (patch)
tree44f01f5ccf6fe03bc2c081583e49b1eb5519612c /drivers/gpu/drm/vmwgfx
parent54c12bc374408faddbff75dbf1a6167c19af39c4 (diff)
downloadlinux-2e586a7e017a502410ba1c1a7411179e1d3fbb2b.tar.xz
drm/vmwgfx: Map the fifo as cached
On the guest kernel side, previously the FIFO has been mapped write- combined. This has worked since VMs up to now has not honored the mapping type and mapped the FIFO cached anyway. Since the FIFO is accessed cached by the CPU on the virtual device side, this leads to inconsistent mappings once the guest starts to honor the mapping types. So ask for cached mappings when we map the FIFO. We do this by using ioremap_cache() instead of ioremap_wc(), and remove the MTRR setup. On the TTM side, MOBs, GMRs and VRAM buffers are already requesting cached mappings for kernel- and user-space. Cc: <stable@vger.kernel.org> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
2 files changed, 2 insertions, 9 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
-
- dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
- dev_priv->mmio_size);
-
- dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
- dev_priv->mmio_size);
+ dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
+ dev_priv->mmio_size);
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
- arch_phys_wc_del(dev_priv->mmio_mtrr);
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
- arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b60b41f95e74..a20f482848ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
uint32_t initial_width;
uint32_t initial_height;
u32 __iomem *mmio_virt;
- int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;