summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig10
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h55
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c194
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h218
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c134
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h147
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h214
-rwxr-xr-xdrivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h219
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c4
45 files changed, 1037 insertions, 1219 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 15acdf2a7c0f..0060ef842b5a 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,13 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && X86 && MMU
- select FB_DEFERRED_IO
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ depends on DRM && PCI && MMU
+ depends on X86 || ARM64
select DRM_TTM
- select FB
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
@@ -20,7 +16,7 @@ config DRM_VMWGFX
The compiled module will be called "vmwgfx.ko".
config DRM_VMWGFX_FBCON
- depends on DRM_VMWGFX && FB
+ depends on DRM_VMWGFX && DRM_FBDEV_EMULATION
bool "Enable framebuffer console under vmwgfx by default"
help
Choose this option if you are shipping a new vmwgfx
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 8c02fa5852e7..09f6dcac768b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
- vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
+ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
@@ -9,7 +9,9 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- ttm_object.o ttm_lock.o ttm_memory.o
+ ttm_object.o ttm_memory.o
+vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
+
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 4db25bd9fa22..127eaf0a0a58 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1467,6 +1467,7 @@ struct svga3dsurface_cache {
/**
* struct svga3dsurface_loc - Surface location
+ * @sheet: The multisample sheet.
* @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
* mip_level.
* @x: X coordinate.
@@ -1474,6 +1475,7 @@ struct svga3dsurface_cache {
* @z: Z coordinate.
*/
struct svga3dsurface_loc {
+ u32 sheet;
u32 sub_resource;
u32 x, y, z;
};
@@ -1566,8 +1568,8 @@ svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
u32 layer;
int i;
- if (offset >= cache->sheet_bytes)
- offset %= cache->sheet_bytes;
+ loc->sheet = offset / cache->sheet_bytes;
+ offset -= loc->sheet * cache->sheet_bytes;
layer = offset / cache->mip_chain_bytes;
offset -= layer * cache->mip_chain_bytes;
@@ -1631,6 +1633,7 @@ svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
u32 sub_resource,
struct svga3dsurface_loc *loc)
{
+ loc->sheet = 0;
loc->sub_resource = sub_resource;
loc->x = loc->y = loc->z = 0;
}
@@ -1652,6 +1655,7 @@ svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
const struct drm_vmw_size *size;
u32 mip;
+ loc->sheet = 0;
loc->sub_resource = sub_resource + 1;
mip = sub_resource % cache->num_mip_levels;
size = &cache->mip[mip].size;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 19fb9e3299e7..193a57f6aae5 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -98,6 +98,10 @@ typedef uint32 SVGAMobId;
#define SVGA_MAGIC 0x900000UL
#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
+/* Version 3 has the control bar instead of the FIFO */
+#define SVGA_VERSION_3 3
+#define SVGA_ID_3 SVGA_MAKE_ID(SVGA_VERSION_3)
+
/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2 2
#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
@@ -129,11 +133,12 @@ typedef uint32 SVGAMobId;
* Interrupts are only supported when the
* SVGA_CAP_IRQMASK capability is present.
*/
-#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
-#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
-#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
-#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
-#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
+#define SVGA_IRQFLAG_ANY_FENCE (1 << 0) /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS (1 << 1) /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER (1 << 3) /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR (1 << 4) /* Error while processing commands */
+#define SVGA_IRQFLAG_MAX (1 << 5)
/*
* The byte-size is the size of the actual cursor data,
@@ -286,7 +291,32 @@ enum {
*/
SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
- SVGA_REG_TOP = 77, /* Must be 1 more than the last register */
+ /*
+ + * These registers are for the addresses of the memory BARs for SVGA3
+ */
+ SVGA_REG_REGS_START_HIGH32 = 77,
+ SVGA_REG_REGS_START_LOW32 = 78,
+ SVGA_REG_FB_START_HIGH32 = 79,
+ SVGA_REG_FB_START_LOW32 = 80,
+
+ /*
+ * A hint register that recommends which quality level the guest should
+ * currently use to define multisample surfaces.
+ *
+ * If the register is SVGA_REG_MSHINT_DISABLED,
+ * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL.
+ *
+ * Otherwise, this is a live value that can change while the VM is
+ * powered on with the hint suggestion for which quality level the guest
+ * should be using. Guests are free to ignore the hint and use either
+ * RESOLVE or FULL quality.
+ */
+ SVGA_REG_MSHINT = 81,
+
+ SVGA_REG_IRQ_STATUS = 82,
+ SVGA_REG_DIRTY_TRACKING = 83,
+
+ SVGA_REG_TOP = 84, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -310,6 +340,17 @@ typedef enum SVGARegGuestDriverId {
SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32,
} SVGARegGuestDriverId;
+typedef enum SVGARegMSHint {
+ SVGA_REG_MSHINT_DISABLED = 0,
+ SVGA_REG_MSHINT_FULL = 1,
+ SVGA_REG_MSHINT_RESOLVED = 2,
+} SVGARegMSHint;
+
+typedef enum SVGARegDirtyTracking {
+ SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0,
+ SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1,
+} SVGARegDirtyTracking;
+
/*
* Guest memory regions (GMRs):
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
deleted file mode 100644
index 5971c72e6d10..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/atomic.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/sched/signal.h>
-#include "ttm_lock.h"
-#include "ttm_object.h"
-
-#define TTM_WRITE_LOCK_PENDING (1 << 0)
-#define TTM_VT_LOCK_PENDING (1 << 1)
-#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
-#define TTM_VT_LOCK (1 << 3)
-#define TTM_SUSPEND_LOCK (1 << 4)
-
-void ttm_lock_init(struct ttm_lock *lock)
-{
- spin_lock_init(&lock->lock);
- init_waitqueue_head(&lock->queue);
- lock->rw = 0;
- lock->flags = 0;
-}
-
-void ttm_read_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- if (--lock->rw == 0)
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_read_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- locked = true;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible)
- ret = wait_event_interruptible(lock->queue,
- __ttm_read_lock(lock));
- else
- wait_event(lock->queue, __ttm_read_lock(lock));
- return ret;
-}
-
-static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
-{
- bool block = true;
-
- *locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- block = false;
- *locked = true;
- } else if (lock->flags == 0) {
- block = false;
- }
- spin_unlock(&lock->lock);
-
- return !block;
-}
-
-int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
- bool locked;
-
- if (interruptible)
- ret = wait_event_interruptible
- (lock->queue, __ttm_read_trylock(lock, &locked));
- else
- wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
-
- if (unlikely(ret != 0)) {
- BUG_ON(locked);
- return ret;
- }
-
- return (locked) ? 0 : -EBUSY;
-}
-
-void ttm_write_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 0;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_write_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
- lock->rw = -1;
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- locked = true;
- } else {
- lock->flags |= TTM_WRITE_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_write_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- }
- } else
- wait_event(lock->queue, __ttm_write_lock(lock));
-
- return ret;
-}
-
-void ttm_suspend_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_SUSPEND_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_suspend_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
- lock->flags |= TTM_SUSPEND_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_SUSPEND_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-void ttm_suspend_lock(struct ttm_lock *lock)
-{
- wait_event(lock->queue, __ttm_suspend_lock(lock));
-}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
deleted file mode 100644
index af8b28ca546f..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-/** @file ttm_lock.h
- * This file implements a simple replacement for the buffer manager use
- * of the DRM heavyweight hardware lock.
- * The lock is a read-write lock. Taking it in read mode and write mode
- * is relatively fast, and intended for in-kernel use only.
- *
- * The vt mode is used only when there is a need to block all
- * user-space processes from validating buffers.
- * It's allowed to leave kernel space with the vt lock held.
- * If a user-space process dies while having the vt-lock,
- * it will be released during the file descriptor release. The vt lock
- * excludes write lock and read lock.
- *
- * The suspend mode is used to lock out all TTM users when preparing for
- * and executing suspend operations.
- *
- */
-
-#ifndef _TTM_LOCK_H_
-#define _TTM_LOCK_H_
-
-#include <linux/atomic.h>
-#include <linux/wait.h>
-
-#include "ttm_object.h"
-
-/**
- * struct ttm_lock
- *
- * @base: ttm base object used solely to release the lock if the client
- * holding the lock dies.
- * @queue: Queue for processes waiting for lock change-of-status.
- * @lock: Spinlock protecting some lock members.
- * @rw: Read-write lock counter. Protected by @lock.
- * @flags: Lock state. Protected by @lock.
- */
-
-struct ttm_lock {
- struct ttm_base_object base;
- wait_queue_head_t queue;
- spinlock_t lock;
- int32_t rw;
- uint32_t flags;
-};
-
-
-/**
- * ttm_lock_init
- *
- * @lock: Pointer to a struct ttm_lock
- * Initializes the lock.
- */
-extern void ttm_lock_init(struct ttm_lock *lock);
-
-/**
- * ttm_read_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a read lock.
- */
-extern void ttm_read_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_read_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in read mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_read_trylock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Tries to take the lock in read mode. If the lock is already held
- * in write mode, the function will return -EBUSY. If the lock is held
- * in vt or suspend mode, the function will sleep until these modes
- * are unlocked.
- *
- * Returns:
- * -EBUSY The lock was already held in write mode.
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_lock_downgrade
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Downgrades a write lock to a read lock.
- */
-extern void ttm_lock_downgrade(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_lock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Takes the lock in suspend mode. Excludes read and write mode.
- */
-extern void ttm_suspend_lock(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a suspend lock
- */
-extern void ttm_suspend_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_vt_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- * @tfile: Pointer to a struct ttm_object_file to register the lock with.
- *
- * Takes the lock in vt mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- * -ENOMEM: Out of memory when locking.
- */
-extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
- struct ttm_object_file *tfile);
-
-/**
- * ttm_vt_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a vt lock.
- * Returns:
- * -EINVAL If the lock was not held.
- */
-extern int ttm_vt_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 112394dd0ab6..04789b2bb2a2 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -540,7 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0)
goto out_no_object_hash;
- idr_init(&tdev->idr);
+ idr_init_base(&tdev->idr, 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 81f525a82b77..05b324825900 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -788,7 +788,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @shader_slot: The shader slot of the binding.
@@ -832,7 +832,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
@@ -1024,7 +1024,7 @@ static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*
@@ -1394,7 +1394,7 @@ struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
}
/**
- * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 3a438ae4d3f4..09fe20e918f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -421,7 +421,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
}
/**
- * ttm_bo_cpu_blit - in-kernel cpu blit.
+ * vmw_bo_cpu_blit - in-kernel cpu blit.
*
* @dst: Destination buffer object.
* @dst_offset: Destination offset of blit start in bytes.
@@ -483,10 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->mem.num_pages;
- d.src_num_pages = src->mem.num_pages;
- d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
+ d.dst_num_pages = dst->resource->num_pages;
+ d.src_num_pages = src->resource->num_pages;
+ d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 50e529a01677..362f56d5b12b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -96,10 +96,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
int ret;
uint32_t new_flags;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -107,7 +103,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
+ ret = ttm_bo_mem_compat(placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -116,9 +112,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
-
err:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -144,10 +138,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
int ret;
uint32_t new_flags;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -155,7 +145,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -172,7 +162,6 @@ out_unreserve:
ttm_bo_unreserve(bo);
err:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -222,16 +211,12 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->mem.num_pages;
+ place.lpfn = bo->resource->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
@@ -242,28 +227,27 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->mem.num_pages &&
- bo->mem.start > 0 &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ bo->resource->start < bo->resource->num_pages &&
+ bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->mem.start != 0);
+ WARN_ON(ret == 0 && bo->resource->start != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -287,10 +271,6 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = &buf->base;
int ret;
- ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -300,7 +280,6 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -314,11 +293,11 @@ err:
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->mem.start << PAGE_SHIFT;
+ ptr->offset = bo->resource->start << PAGE_SHIFT;
} else {
- ptr->gmrId = bo->mem.start;
+ ptr->gmrId = bo->resource->start;
ptr->offset = 0;
}
}
@@ -337,7 +316,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
+ uint32_t old_mem_type = bo->resource->mem_type;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -347,8 +326,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
pl.fpfn = 0;
pl.lpfn = 0;
- pl.mem_type = bo->mem.mem_type;
- pl.flags = bo->mem.placement;
+ pl.mem_type = bo->resource->mem_type;
+ pl.flags = bo->resource->placement;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
@@ -356,7 +335,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
ret = ttm_bo_validate(bo, &placement, &ctx);
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+ BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
if (pin)
ttm_bo_pin(bo);
@@ -390,7 +369,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
@@ -460,6 +439,7 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
+ dma_resv_fini(&bo->base._resv);
kfree(vmw_bo);
}
@@ -512,6 +492,11 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
if (unlikely(ret))
goto error_free;
+
+ bo->base.size = size;
+ dma_resv_init(&bo->base._resv);
+ drm_vma_node_reset(&bo->base.vma_node);
+
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_device, placement, 0,
&ctx, NULL, NULL, NULL);
@@ -570,6 +555,10 @@ int vmw_bo_init(struct vmw_private *dev_priv,
if (unlikely(ret))
return ret;
+ vmw_bo->base.base.size = size;
+ dma_resv_init(&vmw_bo->base.base._resv);
+ drm_vma_node_reset(&vmw_bo->base.base.vma_node);
+
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, &ctx, NULL, NULL, bo_free);
@@ -611,7 +600,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
/**
- * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
* for vmw user buffer objects
*
* @base: Pointer to the TTM base object
@@ -754,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout_rcu
- (bo->base.resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ lret = dma_resv_wait_timeout(bo->base.resv, true, true,
+ nonblock ? 0 :
+ MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -896,10 +885,6 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int ret;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &vbo,
NULL);
@@ -914,7 +899,6 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
vmw_bo_unreference(&vbo);
out_no_bo:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1109,10 +1093,6 @@ int vmw_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
&vbo, NULL);
@@ -1121,7 +1101,6 @@ int vmw_dumb_create(struct drm_file *file_priv,
vmw_bo_unreference(&vbo);
out_no_bo:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1218,7 +1197,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* With other types of moves, the underlying pages stay the same,
* and the map can be kept.
*/
- if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
vmw_bo_unmap(vbo);
/*
@@ -1226,6 +1205,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* read back all resource content first, and unbind the MOB from
* the resource.
*/
- if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 20246a7c97c9..956b85e35cef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -31,15 +31,10 @@
#include "vmwgfx_drv.h"
-struct vmw_temp_set_context {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXTempSetContext body;
-};
-
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
uint32_t fifo_min, hwversion;
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ const struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_3D))
return false;
@@ -61,6 +56,8 @@ bool vmw_supports_3d(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
+ BUG_ON(vmw_is_svga_v3(dev_priv));
+
fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@@ -98,16 +95,24 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
return false;
}
-int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
{
+ struct vmw_fifo_state *fifo;
uint32_t max;
uint32_t min;
- fifo->dx = false;
+ if (!dev_priv->fifo_mem)
+ return NULL;
+
+ fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return ERR_PTR(-ENOMEM);
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
- if (unlikely(fifo->static_buffer == NULL))
- return -ENOMEM;
+ if (unlikely(fifo->static_buffer == NULL)) {
+ kfree(fifo);
+ return ERR_PTR(-ENOMEM);
+ }
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
@@ -115,20 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
-
- DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
- DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
- DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
-
- dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
- dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
- dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-
- vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
- SVGA_REG_ENABLE_HIDE);
-
- vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
@@ -155,35 +146,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
-
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
-
- return 0;
+ return fifo;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->fifo_mem;
-
- if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
+ if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+
}
-void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+void vmw_fifo_destroy(struct vmw_private *dev_priv)
{
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
- ;
-
- dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ struct vmw_fifo_state *fifo = dev_priv->fifo;
- vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
- dev_priv->config_done_state);
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- dev_priv->enable_state);
- vmw_write(dev_priv, SVGA_REG_TRACES,
- dev_priv->traces_state);
+ if (!fifo)
+ return;
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
@@ -194,6 +173,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
+ kfree(fifo);
+ dev_priv->fifo = NULL;
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
@@ -289,7 +270,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
@@ -438,16 +419,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
- if (fifo_state->dx)
- bytes += sizeof(struct vmw_temp_set_context);
-
- fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
@@ -495,7 +472,7 @@ void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
/**
- * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
@@ -509,7 +486,7 @@ void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
}
/**
- * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * vmw_cmd_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
@@ -527,7 +504,6 @@ int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
u32 *fm;
int ret = 0;
@@ -546,7 +522,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
- if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+ if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
/*
* Don't request hardware to send a fence. The
@@ -561,22 +537,22 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv, fifo_state);
+ vmw_update_seqno(dev_priv);
out_err:
return ret;
}
/**
- * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
*/
-static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
@@ -600,11 +576,11 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
- cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT;
+ cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
} else {
- cmd->body.guestResult.gmrId = bo->mem.start;
+ cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
}
@@ -614,16 +590,16 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
}
/**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
*/
-static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
- uint32_t cid)
+static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
/*
* A query wait without a preceding query end will
@@ -645,8 +621,8 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd->body.mobid = bo->mem.start;
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->resource->start;
cmd->body.offset = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -656,7 +632,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
/**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
@@ -677,7 +653,27 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
- return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+ return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
- return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+ return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
+}
+
+
+/**
+ * vmw_cmd_supported - returns true if the given device supports
+ * command queues.
+ *
+ * @vmw: The device private structure.
+ *
+ * Returns true if we can issue commands.
+ */
+bool vmw_cmd_supported(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0)
+ return true;
+ /*
+ * We have FIFO cmd's
+ */
+ return vmw->fifo_mem != NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 2e23e537cdf5..6bb4961e64a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -295,7 +295,7 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
/**
- * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
*
* @header: The header of the buffer to submit.
*/
@@ -620,7 +620,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
}
/**
- * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
*
* @man: The command buffer manager.
* @check_preempted: Check also the preempted queue for pending command buffers.
@@ -889,7 +889,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4a5a3e246216..dffe3804ad3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -346,7 +346,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -355,7 +355,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -385,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_state_scrub(uctx->cbs);
@@ -513,7 +513,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -522,7 +522,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -594,7 +594,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
@@ -748,10 +748,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
&ttm_opt_ctx);
@@ -759,7 +755,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
" creation.\n");
- goto out_unlock;
+ goto out_ret;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -767,7 +763,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_ret;
}
res = &ctx->res;
@@ -780,7 +776,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out_ret;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -794,8 +790,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index d782b49c7236..c84a16c1def0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -173,7 +173,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
SVGA3dCmdDXSetCOTable body;
} *cmd;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
@@ -181,12 +181,12 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
return -ENOMEM;
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
- WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
@@ -315,7 +315,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (!vmw_resource_mob_attached(res))
return 0;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
@@ -431,7 +431,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->mem.num_pages; ++i) {
+ for (i = 0; i < old_bo->resource->num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
@@ -653,7 +653,7 @@ int vmw_cotable_notify(struct vmw_resource *res, int id)
}
/**
- * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ * vmw_cotable_add_resource - add a view to the cotable's list of active views.
*
* @res: pointer struct vmw_resource representing the cotable.
* @head: pointer to the struct list_head member of the resource, dedicated
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 399f70d340eb..6f5ea00973e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -31,12 +31,13 @@
#include <linux/pci.h>
#include <linux/mem_encrypt.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <generated/utsrelease.h>
#include "ttm_object.h"
#include "vmwgfx_binding.h"
@@ -50,7 +51,7 @@
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-/**
+/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -246,6 +247,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
static const struct pci_device_id vmw_pci_id_list[] = {
{ PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+ { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -393,6 +395,60 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
return ret;
}
+static int vmw_device_init(struct vmw_private *dev_priv)
+{
+ bool uses_fb_traces = false;
+
+ DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+ DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+ DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+ dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+ dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+ SVGA_REG_ENABLE_HIDE);
+
+ uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
+ (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
+
+ vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
+ dev_priv->fifo = vmw_fifo_create(dev_priv);
+ if (IS_ERR(dev_priv->fifo)) {
+ int err = PTR_ERR(dev_priv->fifo);
+ dev_priv->fifo = NULL;
+ return err;
+ } else if (!dev_priv->fifo) {
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+ }
+
+ dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
+ atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ return 0;
+}
+
+static void vmw_device_fini(struct vmw_private *vmw)
+{
+ /*
+ * Legacy sync
+ */
+ vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
+ ;
+
+ vmw->last_read_seqno = vmw_fence_read(vmw);
+
+ vmw_write(vmw, SVGA_REG_CONFIG_DONE,
+ vmw->config_done_state);
+ vmw_write(vmw, SVGA_REG_ENABLE,
+ vmw->enable_state);
+ vmw_write(vmw, SVGA_REG_TRACES,
+ vmw->traces_state);
+
+ vmw_fifo_destroy(vmw);
+}
+
/**
* vmw_request_device_late - Perform late device setup
*
@@ -433,9 +489,9 @@ static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
- ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+ ret = vmw_device_init(dev_priv);
if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to initialize FIFO.\n");
+ DRM_ERROR("Unable to initialize the device.\n");
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
@@ -469,7 +525,7 @@ out_no_query_bo:
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_device_fini(dev_priv);
return ret;
}
@@ -517,7 +573,7 @@ static void vmw_release_device_late(struct vmw_private *dev_priv)
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_device_fini(dev_priv);
}
/*
@@ -638,6 +694,8 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
static int vmw_setup_pci_resources(struct vmw_private *dev,
unsigned long pci_id)
{
+ resource_size_t rmmio_start;
+ resource_size_t rmmio_size;
resource_size_t fifo_start;
resource_size_t fifo_size;
int ret;
@@ -649,23 +707,45 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (ret)
return ret;
- dev->io_start = pci_resource_start(pdev, 0);
- dev->vram_start = pci_resource_start(pdev, 1);
- dev->vram_size = pci_resource_len(pdev, 1);
- fifo_start = pci_resource_start(pdev, 2);
- fifo_size = pci_resource_len(pdev, 2);
-
- DRM_INFO("FIFO at %pa size is %llu kiB\n",
- &fifo_start, (uint64_t)fifo_size / 1024);
- dev->fifo_mem = devm_memremap(dev->drm.dev,
- fifo_start,
- fifo_size,
- MEMREMAP_WB);
-
- if (IS_ERR(dev->fifo_mem)) {
- DRM_ERROR("Failed mapping FIFO memory.\n");
+ dev->pci_id = pci_id;
+ if (pci_id == VMWGFX_PCI_ID_SVGA3) {
+ rmmio_start = pci_resource_start(pdev, 0);
+ rmmio_size = pci_resource_len(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 2);
+ dev->vram_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
+ &rmmio_start, (uint64_t)rmmio_size / 1024);
+ dev->rmmio = devm_ioremap(dev->drm.dev,
+ rmmio_start,
+ rmmio_size);
+ if (!dev->rmmio) {
+ DRM_ERROR("Failed mapping registers mmio memory.\n");
+ pci_release_regions(pdev);
+ return -ENOMEM;
+ }
+ } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
+ dev->io_start = pci_resource_start(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 1);
+ dev->vram_size = pci_resource_len(pdev, 1);
+ fifo_start = pci_resource_start(pdev, 2);
+ fifo_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("FIFO at %pa size is %llu kiB\n",
+ &fifo_start, (uint64_t)fifo_size / 1024);
+ dev->fifo_mem = devm_memremap(dev->drm.dev,
+ fifo_start,
+ fifo_size,
+ MEMREMAP_WB);
+
+ if (IS_ERR(dev->fifo_mem)) {
+ DRM_ERROR("Failed mapping FIFO memory.\n");
+ pci_release_regions(pdev);
+ return PTR_ERR(dev->fifo_mem);
+ }
+ } else {
pci_release_regions(pdev);
- return PTR_ERR(dev->fifo_mem);
+ return -EINVAL;
}
/*
@@ -684,13 +764,16 @@ static int vmw_detect_version(struct vmw_private *dev)
{
uint32_t svga_id;
- vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+ vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
+ SVGA_ID_3 : SVGA_ID_2);
svga_id = vmw_read(dev, SVGA_REG_ID);
- if (svga_id != SVGA_ID_2) {
+ if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
svga_id, dev->vmw_chipset);
return -ENOSYS;
}
+ BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
+ DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
return 0;
}
@@ -699,16 +782,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- char host_log[100] = {0};
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
dev_priv->vmw_chipset = pci_id;
- dev_priv->last_read_seqno = (uint32_t) -100;
dev_priv->drm.dev_private = dev_priv;
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->binding_mutex);
- ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
@@ -724,7 +804,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
for (i = vmw_res_context; i < vmw_res_max; ++i) {
- idr_init(&dev_priv->res_idr[i]);
+ idr_init_base(&dev_priv->res_idr[i], 1);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
@@ -825,6 +905,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
vmw_print_capabilities2(dev_priv->capabilities2);
+ DRM_INFO("Supports command queues = %d\n",
+ vmw_cmd_supported((dev_priv)));
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
@@ -966,11 +1048,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
DRM_INFO("SM4_1 support available.\n");
if (dev_priv->sm_type == VMW_SM_4)
DRM_INFO("SM4 support available.\n");
+ DRM_INFO("Running without reservation semaphore\n");
- snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
- VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
- VMWGFX_DRIVER_PATCHLEVEL);
- vmw_host_log(host_log);
+ vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
+ VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+ VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv);
@@ -1179,7 +1261,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
if (!ttm_resource_manager_used(man)) {
- vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
ttm_resource_manager_set_used(man, true);
}
}
@@ -1191,9 +1273,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
*/
void vmw_svga_enable(struct vmw_private *dev_priv)
{
- (void) ttm_read_lock(&dev_priv->reservation_sem, false);
__vmw_svga_enable(dev_priv);
- ttm_read_unlock(&dev_priv->reservation_sem);
}
/**
@@ -1238,7 +1318,6 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
*
*/
vmw_kms_lost_device(&dev_priv->drm);
- ttm_write_lock(&dev_priv->reservation_sem, false);
if (ttm_resource_manager_used(man)) {
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
@@ -1247,7 +1326,6 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
- ttm_write_unlock(&dev_priv->reservation_sem);
}
static void vmw_remove(struct pci_dev *pdev)
@@ -1287,14 +1365,12 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* Once user-space processes have been frozen, we can release
* the lock again.
*/
- ttm_suspend_lock(&dev_priv->reservation_sem);
dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
if (READ_ONCE(dev_priv->suspend_locked)) {
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
}
break;
default:
@@ -1353,20 +1429,16 @@ static int vmw_pm_freeze(struct device *kdev)
int ret;
/*
- * Unlock for vmw_kms_suspend.
* No user-space processes should be running now.
*/
- ttm_suspend_unlock(&dev_priv->reservation_sem);
ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
- ttm_suspend_lock(&dev_priv->reservation_sem);
DRM_ERROR("Failed to freeze modesetting.\n");
return ret;
}
if (dev_priv->enable_fb)
vmw_fb_off(dev_priv);
- ttm_suspend_lock(&dev_priv->reservation_sem);
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
@@ -1379,7 +1451,6 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
vmw_kms_resume(dev);
if (dev_priv->enable_fb)
@@ -1401,8 +1472,7 @@ static int vmw_pm_restore(struct device *kdev)
struct vmw_private *dev_priv = vmw_priv(dev);
int ret;
- vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
- (void) vmw_read(dev_priv, SVGA_REG_ID);
+ vmw_detect_version(dev_priv);
if (dev_priv->enable_fb)
vmw_fifo_resource_inc(dev_priv);
@@ -1416,7 +1486,6 @@ static int vmw_pm_restore(struct device *kdev)
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
vmw_kms_resume(&dev_priv->drm);
@@ -1440,8 +1509,8 @@ static const struct file_operations vmwgfx_driver_fops = {
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
+ .poll = drm_poll,
+ .read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = vmw_compat_ioctl,
#endif
@@ -1490,7 +1559,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct vmw_private *vmw;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c6b1eb5952bc..d1cef3b69e9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include "ttm_lock.h"
#include "ttm_object.h"
#include "vmwgfx_fence.h"
@@ -67,6 +66,7 @@
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
#define VMWGFX_PCI_ID_SVGA2 0x0405
+#define VMWGFX_PCI_ID_SVGA3 0x0406
/*
* Perhaps we should have sysfs entries for these.
@@ -285,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- bool dx;
};
/**
@@ -486,14 +485,14 @@ struct vmw_private {
struct drm_device drm;
struct ttm_device bdev;
- struct vmw_fifo_state fifo;
-
struct drm_vma_offset_manager vma_manager;
+ unsigned long pci_id;
u32 vmw_chipset;
resource_size_t io_start;
resource_size_t vram_start;
resource_size_t vram_size;
resource_size_t prim_bb_mem;
+ void __iomem *rmmio;
u32 *fifo_mem;
resource_size_t fifo_mem_size;
uint32_t fb_max_width;
@@ -594,11 +593,6 @@ struct vmw_private {
atomic_t num_fifo_resources;
/*
- * Replace this with an rwsem as soon as we have down_xx_interruptible()
- */
- struct ttm_lock reservation_sem;
-
- /*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
@@ -629,6 +623,7 @@ struct vmw_private {
*/
struct vmw_otable_batch otable_batch;
+ struct vmw_fifo_state *fifo;
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
@@ -652,6 +647,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
}
/*
+ * SVGA v3 has mmio register access and lacks fifo cmds
+ */
+static inline bool vmw_is_svga_v3(const struct vmw_private *dev)
+{
+ return dev->pci_id == VMWGFX_PCI_ID_SVGA3;
+}
+
+/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
* don't perform much register access in the timing critical paths anyway.
@@ -661,10 +664,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- spin_lock(&dev_priv->hw_lock);
- outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
- outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock(&dev_priv->hw_lock);
+ if (vmw_is_svga_v3(dev_priv)) {
+ iowrite32(value, dev_priv->rmmio + offset);
+ } else {
+ spin_lock(&dev_priv->hw_lock);
+ outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+ outl(value, dev_priv->io_start + SVGA_VALUE_PORT);
+ spin_unlock(&dev_priv->hw_lock);
+ }
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
@@ -672,10 +679,14 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
{
u32 val;
- spin_lock(&dev_priv->hw_lock);
- outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
- val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock(&dev_priv->hw_lock);
+ if (vmw_is_svga_v3(dev_priv)) {
+ val = ioread32(dev_priv->rmmio + offset);
+ } else {
+ spin_lock(&dev_priv->hw_lock);
+ outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+ val = inl(dev_priv->io_start + SVGA_VALUE_PORT);
+ spin_unlock(&dev_priv->hw_lock);
+ }
return val;
}
@@ -938,19 +949,14 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern __poll_t vmw_fops_poll(struct file *filp,
- struct poll_table_struct *wait);
-extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset);
/**
* Fifo utilities - vmwgfx_fifo.c
*/
-extern int vmw_fifo_init(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo);
-extern void vmw_fifo_release(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo);
+extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv);
+extern void vmw_fifo_destroy(struct vmw_private *dev_priv);
+extern bool vmw_cmd_supported(struct vmw_private *vmw);
extern void *
vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
@@ -976,6 +982,31 @@ extern int vmw_cmd_flush(struct vmw_private *dev_priv,
#define VMW_CMD_RESERVE(__priv, __bytes) \
VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
+
+/**
+ * vmw_fifo_caps - Returns the capabilities of the FIFO command
+ * queue or 0 if fifo memory isn't present.
+ * @dev_priv: The device private context
+ */
+static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
+{
+ if (!dev_priv->fifo_mem || !dev_priv->fifo)
+ return 0;
+ return dev_priv->fifo->capabilities;
+}
+
+
+/**
+ * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
+ * is enabled in the FIFO.
+ * @dev_priv: The device private context
+ */
+static inline bool
+vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
+{
+ return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
+}
+
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
@@ -1085,9 +1116,6 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
* IRQs and wating - vmwgfx_irq.c
*/
-extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
- uint32_t seqno, bool interruptible,
- unsigned long timeout);
extern int vmw_irq_install(struct drm_device *dev, int irq);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
@@ -1098,8 +1126,7 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo_state);
+extern void vmw_update_seqno(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
@@ -1114,10 +1141,29 @@ extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
* Kernel framebuffer - vmwgfx_fb.c
*/
+#ifdef CONFIG_DRM_FBDEV_EMULATION
int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
+#else
+static inline int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_close(struct vmw_private *dev_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+#endif
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -1452,7 +1498,7 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Host messaging -vmwgfx_msg.c: */
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
-int vmw_host_log(const char *log);
+__printf(1, 2) int vmw_host_printf(const char *fmt, ...);
int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1559,6 +1605,7 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
*/
static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
+ BUG_ON(vmw_is_svga_v3(vmw));
return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
@@ -1573,6 +1620,44 @@ static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
u32 value)
{
+ BUG_ON(vmw_is_svga_v3(vmw));
WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
+
+static inline u32 vmw_fence_read(struct vmw_private *dev_priv)
+{
+ u32 fence;
+ if (vmw_is_svga_v3(dev_priv))
+ fence = vmw_read(dev_priv, SVGA_REG_FENCE);
+ else
+ fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ return fence;
+}
+
+static inline void vmw_fence_write(struct vmw_private *dev_priv,
+ u32 fence)
+{
+ BUG_ON(vmw_is_svga_v3(dev_priv));
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence);
+}
+
+static inline u32 vmw_irq_status_read(struct vmw_private *vmw)
+{
+ u32 status;
+ if (vmw_is_svga_v3(vmw))
+ status = vmw_read(vmw, SVGA_REG_IRQ_STATUS);
+ else
+ status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT);
+ return status;
+}
+
+static inline void vmw_irq_status_write(struct vmw_private *vmw,
+ uint32 status)
+{
+ if (vmw_is_svga_v3(vmw))
+ vmw_write(vmw, SVGA_REG_IRQ_STATUS, status);
+ else
+ outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a24196f92c3..a2b8464b3f56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -711,7 +711,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
}
/**
- * vmw_rebind_dx_query - Rebind DX query associated with the context
+ * vmw_rebind_all_dx_query - Rebind DX query associated with the context
*
* @ctx_res: context the query belongs to
*
@@ -735,7 +735,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
- cmd->body.mobid = dx_query_mob->base.mem.start;
+ cmd->body.mobid = dx_query_mob->base.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1046,7 +1046,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1140,7 +1140,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
* to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
@@ -1195,7 +1195,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
* to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
@@ -2308,7 +2308,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate
+ * vmw_cmd_dx_set_index_buffer - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2347,7 +2347,7 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -2402,7 +2402,7 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate
+ * vmw_cmd_dx_clear_depthstencil_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2513,7 +2513,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
- binding.bi.bt = vmw_ctx_binding_so_target,
+ binding.bi.bt = vmw_ctx_binding_so_target;
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
@@ -2763,12 +2763,24 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
container_of(header, typeof(*cmd), header);
- struct vmw_resource *ret;
+ struct vmw_resource *view;
+ struct vmw_res_cache_entry *rcache;
- ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId);
+ view = vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
+ if (IS_ERR(view))
+ return PTR_ERR(view);
- return PTR_ERR_OR_ZERO(ret);
+ /*
+ * Normally the shader-resource view is not gpu-dirtying, but for
+ * this particular command it is...
+ * So mark the last looked-up surface, which is the surface
+ * the view points to, gpu-dirty.
+ */
+ rcache = &sw_context->res_cache[vmw_res_surface];
+ vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
+ VMW_RES_DIRTY_SET);
+ return 0;
}
/**
@@ -3698,16 +3710,16 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
bo = &reloc->vbo->base;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
- reloc->location->offset += bo->mem.start << PAGE_SHIFT;
+ reloc->location->offset += bo->resource->start << PAGE_SHIFT;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
- reloc->location->gmrId = bo->mem.start;
+ reloc->location->gmrId = bo->resource->start;
break;
case VMW_PL_MOB:
- *reloc->mob_loc = bo->mem.start;
+ *reloc->mob_loc = bo->resource->start;
break;
default:
BUG();
@@ -3829,7 +3841,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv, &dev_priv->fifo);
+ vmw_update_seqno(dev_priv);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
@@ -4431,10 +4443,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
@@ -4442,7 +4450,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
(void __user *)(unsigned long)arg->fence_rep,
NULL, arg->flags);
- ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 33f07abfc3ae..d18c6a56e3dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -195,7 +195,6 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
if (!cur_fb)
goto out_unlock;
- (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
virtual = vmw_bo_map_and_cache(vbo);
if (!virtual)
@@ -254,7 +253,6 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
out_unreserve:
ttm_bo_unreserve(&vbo->base);
- ttm_read_unlock(&vmw_priv->reservation_sem);
if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
@@ -396,8 +394,6 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo;
int ret;
- (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
-
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo) {
ret = -ENOMEM;
@@ -412,12 +408,8 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo;
- ttm_write_unlock(&vmw_priv->reservation_sem);
-
- return 0;
err_unlock:
- ttm_write_unlock(&vmw_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 23523eb3cac2..9fe12329a4d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -139,12 +139,10 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ u32 seqno = vmw_fence_read(dev_priv);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
return true;
}
@@ -177,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
spin_lock(f->lock);
@@ -464,7 +461,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
bool needs_rerun;
uint32_t seqno, new_seqno;
- seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+ seqno = vmw_fence_read(fman->dev_priv);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -486,7 +483,7 @@ rerun:
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
- new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+ new_seqno = vmw_fence_read(fman->dev_priv);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
@@ -529,13 +526,6 @@ int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
return ret;
}
-void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
-{
- struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-}
-
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
dma_fence_free(&fence->base);
@@ -992,7 +982,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
}
/**
- * vmw_event_fence_action_create - Post an event for sending when a fence
+ * vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
*
* @file_priv: The file connection on which the event should be posted.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 50e9fdd7acf1..079ab4f3ba51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -94,8 +94,6 @@ extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
bool lazy,
bool interruptible, unsigned long timeout);
-extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
-
extern int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
struct vmw_fence_obj **p_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 964ddf1ca57a..c482e5298e11 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -72,7 +72,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
while (num_pages > 0) {
- unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+ unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP);
remap_cmd.offsetPages = remap_pos;
remap_cmd.numPages = nr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 1774960d1b89..28ceb749a733 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -52,11 +52,17 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;
+ *res = kmalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -64,36 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += mem->num_pages;
+ gman->used_gmr_pages += (*res)->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
- mem->mm_node = gman;
- mem->start = id;
+ (*res)->start = id;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= mem->num_pages;
+ gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
+ kfree(*res);
return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
- if (mem->mm_node) {
- ida_free(&gman->gmr_ida, mem->start);
- spin_lock(&gman->lock);
- gman->used_gmr_pages -= mem->num_pages;
- spin_unlock(&gman->lock);
- mem->mm_node = NULL;
- }
+ ida_free(&gman->gmr_ida, res->start);
+ spin_lock(&gman->lock);
+ gman->used_gmr_pages -= res->num_pages;
+ spin_unlock(&gman->lock);
+ kfree(res);
}
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b36032964b2f..4fdacf9924e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -60,15 +60,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->capabilities2;
break;
case DRM_VMW_PARAM_FIFO_CAPS:
- param->value = dev_priv->fifo.capabilities;
+ param->value = vmw_fifo_caps(dev_priv);
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->prim_bb_mem;
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
param->value = SVGA3D_HWVERSION_WS8_B1;
break;
@@ -76,7 +74,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value =
vmw_fifo_mem_read(dev_priv,
- ((fifo->capabilities &
+ ((vmw_fifo_caps(dev_priv) &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
@@ -302,10 +300,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
user_surface_converter,
&res);
@@ -322,8 +316,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
vmw_surface_unreference(&surface);
out_no_surface:
- ttm_read_unlock(&dev_priv->reservation_sem);
-out_no_ttm_lock:
drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
@@ -391,15 +383,10 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_ttm_lock;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
ret = vmw_kms_readback(dev_priv, file_priv,
vfb, user_fence_rep,
clips, num_clips);
- ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_put(fb);
out_no_fb:
@@ -409,46 +396,3 @@ out_no_copy:
out_clips:
return ret;
}
-
-
-/**
- * vmw_fops_poll - wrapper around the drm_poll function
- *
- * @filp: See the linux fops poll documentation.
- * @wait: See the linux fops poll documentation.
- *
- * Wrapper around the drm_poll function that makes sure the device is
- * processing the fifo if drm_poll decides to wait.
- */
-__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_poll(filp, wait);
-}
-
-
-/**
- * vmw_fops_read - wrapper around the drm_read function
- *
- * @filp: See the linux fops read documentation.
- * @buffer: See the linux fops read documentation.
- * @count: See the linux fops read documentation.
- * @offset: See the linux fops read documentation.
- *
- * Wrapper around the drm_read function that makes sure the device is
- * processing the fifo if drm_read decides to wait.
- */
-ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_read(filp, buffer, count, offset);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 6c2a569f1fcb..b9a9b7ddadbd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -65,7 +65,7 @@ static irqreturn_t vmw_thread_fn(int irq, void *arg)
}
/**
- * vmw_irq_handler irq handler
+ * vmw_irq_handler: irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
@@ -82,11 +82,11 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
uint32_t status, masked_status;
irqreturn_t ret = IRQ_HANDLED;
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
if (likely(status))
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_irq_status_write(dev_priv, status);
if (!status)
return IRQ_NONE;
@@ -114,10 +114,9 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo_state)
+void vmw_update_seqno(struct vmw_private *dev_priv)
{
- uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ uint32_t seqno = vmw_fence_read(dev_priv);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
@@ -128,18 +127,16 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
- struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- fifo_state = &dev_priv->fifo;
- vmw_update_seqno(dev_priv, fifo_state);
+ vmw_update_seqno(dev_priv);
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+ if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, seqno))
return true;
@@ -161,7 +158,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
bool interruptible,
unsigned long timeout)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t count = 0;
uint32_t signal_seq;
@@ -221,7 +218,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle)
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
+ vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue);
out_err:
@@ -236,7 +233,7 @@ void vmw_generic_waiter_add(struct vmw_private *dev_priv,
{
spin_lock_bh(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
- outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
@@ -278,59 +275,13 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
&dev_priv->goal_queue_waiters);
}
-int vmw_wait_seqno(struct vmw_private *dev_priv,
- bool lazy, uint32_t seqno,
- bool interruptible, unsigned long timeout)
-{
- long ret;
- struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
- return 0;
-
- if (likely(vmw_seqno_passed(dev_priv, seqno)))
- return 0;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
- if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
- return vmw_fallback_wait(dev_priv, lazy, true, seqno,
- interruptible, timeout);
-
- if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return vmw_fallback_wait(dev_priv, lazy, false, seqno,
- interruptible, timeout);
-
- vmw_seqno_waiter_add(dev_priv);
-
- if (interruptible)
- ret = wait_event_interruptible_timeout
- (dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
- timeout);
- else
- ret = wait_event_timeout
- (dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
- timeout);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- if (unlikely(ret == 0))
- ret = -EBUSY;
- else if (likely(ret > 0))
- ret = 0;
-
- return ret;
-}
-
static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
+ vmw_irq_status_write(dev_priv, status);
}
void vmw_irq_uninstall(struct drm_device *dev)
@@ -346,8 +297,8 @@ void vmw_irq_uninstall(struct drm_device *dev)
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
+ vmw_irq_status_write(dev_priv, status);
dev->irq_enabled = false;
free_irq(dev->irq, dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index abbca8b0b3c5..220f9fd0d420 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -38,8 +38,10 @@
void vmw_du_cleanup(struct vmw_display_unit *du)
{
+ struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
drm_plane_cleanup(&du->primary);
- drm_plane_cleanup(&du->cursor);
+ if (vmw_cmd_supported(dev_priv))
+ drm_plane_cleanup(&du->cursor);
drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
@@ -128,11 +130,17 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv,
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, show ? 1 : 0);
+ }
spin_unlock(&dev_priv->cursor_lock);
}
@@ -289,7 +297,7 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane)
/**
- * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
+ * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
* @unreference: true if we also want to unreference the display.
@@ -474,7 +482,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
* vmw_du_cursor_plane_atomic_check - check if the new state is okay
*
* @plane: cursor plane
- * @new_state: info on the new plane state
+ * @state: info on the new plane state
*
* This is a chance to fail if the new cursor state does not fit
* our requirements.
@@ -1008,12 +1016,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(&dev_priv->drm);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(&dev_priv->drm);
- return ret;
- }
-
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -1037,7 +1039,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
vmw_cmd_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(&dev_priv->drm);
@@ -1052,7 +1053,8 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- if (dev_priv->active_display_unit == vmw_du_legacy)
+ if (dev_priv->active_display_unit == vmw_du_legacy &&
+ vmw_cmd_supported(dev_priv))
return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
color, clips, num_clips);
@@ -2640,7 +2642,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 87e0b303d900..d85c7eab9469 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -404,19 +404,24 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
- /* Initialize cursor plane */
- ret = drm_universal_plane_init(dev, &ldu->base.cursor,
- 0, &vmw_ldu_cursor_funcs,
- vmw_cursor_plane_formats,
- ARRAY_SIZE(vmw_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR, NULL);
- if (ret) {
- DRM_ERROR("Failed to initialize cursor plane");
- drm_plane_cleanup(&ldu->base.primary);
- goto err_free;
- }
+ /*
+ * We're going to be using traces and software cursors
+ */
+ if (vmw_cmd_supported(dev_priv)) {
+ /* Initialize cursor plane */
+ ret = drm_universal_plane_init(dev, &ldu->base.cursor,
+ 0, &vmw_ldu_cursor_funcs,
+ vmw_cursor_plane_formats,
+ ARRAY_SIZE(vmw_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize cursor plane");
+ drm_plane_cleanup(&ldu->base.primary);
+ goto err_free;
+ }
- drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+ drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+ }
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -445,9 +450,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
- ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
- &ldu->base.cursor,
- &vmw_legacy_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(
+ dev, crtc, &ldu->base.primary,
+ vmw_cmd_supported(dev_priv) ? &ldu->base.cursor : NULL,
+ &vmw_legacy_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to initialize CRTC\n");
goto err_free_unregister;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 609269625468..3d08f5700bdb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -33,7 +33,8 @@
#include <asm/hypervisor.h>
#include "vmwgfx_drv.h"
-#include "vmwgfx_msg.h"
+#include "vmwgfx_msg_x86.h"
+#include "vmwgfx_msg_arm64.h"
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
@@ -473,30 +474,40 @@ out_open:
}
-
/**
- * vmw_host_log: Sends a log message to the host
+ * vmw_host_printf: Sends a log message to the host
*
- * @log: NULL terminated string
+ * @fmt: Regular printf format string and arguments
*
* Returns: 0 on success
*/
-int vmw_host_log(const char *log)
+__printf(1, 2)
+int vmw_host_printf(const char *fmt, ...)
{
+ va_list ap;
struct rpc_channel channel;
char *msg;
+ char *log;
int ret = 0;
-
if (!vmw_msg_enabled)
return -ENODEV;
- if (!log)
+ if (!fmt)
return ret;
+ va_start(ap, fmt);
+ log = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!log) {
+ DRM_ERROR("Cannot allocate memory for the log message.\n");
+ return -ENOMEM;
+ }
+
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for host log message.\n");
+ kfree(log);
return -ENOMEM;
}
@@ -508,6 +519,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel);
kfree(msg);
+ kfree(log);
return 0;
@@ -515,6 +527,7 @@ out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
+ kfree(log);
DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
@@ -537,7 +550,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_msg_arg *arg =
- (struct drm_vmw_msg_arg *) data;
+ (struct drm_vmw_msg_arg *)data;
struct rpc_channel channel;
char *msg;
int length;
@@ -577,7 +590,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
}
if (reply && reply_len > 0) {
if (copy_to_user((void __user *)((unsigned long)arg->receive),
- reply, reply_len)) {
+ reply, reply_len)) {
DRM_ERROR("Failed to copy message to userspace.\n");
kfree(reply);
goto out_msg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
deleted file mode 100644
index f685c7071dec..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
-/**************************************************************************
- *
- * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************
- *
- * Based on code from vmware.c and vmmouse.c.
- * Author:
- * Sinclair Yeh <syeh@vmware.com>
- */
-#ifndef _VMWGFX_MSG_H
-#define _VMWGFX_MSG_H
-
-#include <asm/vmware.h>
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last two parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ebx: [IN] Message Len, through EBX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- flags, magic, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile (VMWARE_HYPERCALL : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(in_ebx), \
- "c"(cmd), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di) : \
- "memory"); \
-})
-
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last 3 parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ecx: [IN] Message Len, through ECX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set to 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @bp: [IN]
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#ifdef __x86_64__
-
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %%rbp;" \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%rbp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %%rbp;" \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%rbp" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-#else
-
-/*
- * In the 32-bit version of this macro, we store bp in a memory location
- * because we've ran out of registers.
- * Now we can't reference that memory location while we've modified
- * %esp or %ebp, so we first push it on the stack, just before we push
- * %ebp, and then when we need it we read it from the stack where we
- * just pushed it.
- */
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-#endif /* #if __x86_64__ */
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
new file mode 100755
index 000000000000..4f40167ad61f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2021 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _VMWGFX_MSG_ARM64_H
+#define _VMWGFX_MSG_ARM64_H
+
+#if defined(__aarch64__)
+
+#define VMWARE_HYPERVISOR_PORT 0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
+
+#define VMWARE_HYPERVISOR_HB BIT(0)
+#define VMWARE_HYPERVISOR_OUT BIT(1)
+
+#define X86_IO_MAGIC 0x86
+
+#define X86_IO_W7_SIZE_SHIFT 0
+#define X86_IO_W7_SIZE_MASK (0x3 << X86_IO_W7_SIZE_SHIFT)
+#define X86_IO_W7_DIR (1 << 2)
+#define X86_IO_W7_WITH (1 << 3)
+#define X86_IO_W7_STR (1 << 4)
+#define X86_IO_W7_DF (1 << 5)
+#define X86_IO_W7_IMM_SHIFT 5
+#define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
+
+static inline void vmw_port(unsigned long cmd, unsigned long in_ebx,
+ unsigned long in_si, unsigned long in_di,
+ unsigned long flags, unsigned long magic,
+ unsigned long *eax, unsigned long *ebx,
+ unsigned long *ecx, unsigned long *edx,
+ unsigned long *si, unsigned long *di)
+{
+ register u64 x0 asm("x0") = magic;
+ register u64 x1 asm("x1") = in_ebx;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in_si;
+ register u64 x5 asm("x5") = in_di;
+
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm volatile("mrs xzr, mdccsr_el0 \n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2),
+ "+r"(x3), "+r"(x4), "+r"(x5)
+ : "r"(x7)
+ :);
+ *eax = x0;
+ *ebx = x1;
+ *ecx = x2;
+ *edx = x3;
+ *si = x4;
+ *di = x5;
+}
+
+static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx,
+ unsigned long in_si, unsigned long in_di,
+ unsigned long flags, unsigned long magic,
+ unsigned long bp, u32 w7dir,
+ unsigned long *eax, unsigned long *ebx,
+ unsigned long *ecx, unsigned long *edx,
+ unsigned long *si, unsigned long *di)
+{
+ register u64 x0 asm("x0") = magic;
+ register u64 x1 asm("x1") = cmd;
+ register u64 x2 asm("x2") = in_ecx;
+ register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB;
+ register u64 x4 asm("x4") = in_si;
+ register u64 x5 asm("x5") = in_di;
+ register u64 x6 asm("x6") = bp;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_STR |
+ X86_IO_W7_WITH |
+ w7dir;
+
+ asm volatile("mrs xzr, mdccsr_el0 \n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2),
+ "+r"(x3), "+r"(x4), "+r"(x5)
+ : "r"(x6), "r"(x7)
+ :);
+ *eax = x0;
+ *ebx = x1;
+ *ecx = x2;
+ *edx = x3;
+ *si = x4;
+ *di = x5;
+}
+
+#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \
+ si, di) \
+ vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \
+ &edx, &si, &di)
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
+ ecx, edx, si, di) \
+ vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
+ 0, &eax, &ebx, &ecx, &edx, &si, &di)
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
+ ecx, edx, si, di) \
+ vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
+ X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di)
+
+#endif
+
+#endif /* _VMWGFX_MSG_ARM64_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
new file mode 100644
index 000000000000..0b74ca2dfb7b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016-2021 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************
+ *
+ * Based on code from vmware.c and vmmouse.c.
+ * Author:
+ * Sinclair Yeh <syeh@vmware.com>
+ */
+#ifndef _VMWGFX_MSG_X86_H
+#define _VMWGFX_MSG_X86_H
+
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <asm/vmware.h>
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last two parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ebx: [IN] Message Len, through EBX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set ot 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
+ flags, magic, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile (VMWARE_HYPERCALL : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(in_ebx), \
+ "c"(cmd), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di) : \
+ "memory"); \
+})
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last 3 parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ecx: [IN] Message Len, through ECX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set to 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @bp: [IN]
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#ifdef __x86_64__
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ VMWARE_HYPERCALL_HB_OUT \
+ "pop %%rbp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ VMWARE_HYPERCALL_HB_IN \
+ "pop %%rbp" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+#elif defined(__i386__)
+
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
+ */
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
+ VMWARE_HYPERCALL_HB_OUT \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
+ VMWARE_HYPERCALL_HB_IN \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+#endif /* defined(__i386__) */
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#endif /* _VMWGFX_MSG_X86_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ac4a9b722279..54c5d16eb3b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -421,7 +421,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
return (dev_priv->overlay_priv != NULL &&
- ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+ ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 45c9c6a7f1d6..e5a9a5cbd01a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.mem.num_pages;
+ pgoff_t num_pages = vbo->base.resource->num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->mem.num_pages)) {
+ if (unlikely(page_offset >= bo->resource->num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -529,7 +529,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index e99f6cdbb091..cf585dfe5669 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -34,10 +34,6 @@
#include <linux/types.h>
-#define VMWGFX_INDEX_PORT 0x0
-#define VMWGFX_VALUE_PORT 0x1
-#define VMWGFX_IRQSTATUS_PORT 0x8
-
struct svga_guest_mem_descriptor {
u32 ppn;
u32 num_pages;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 35f02958ee2c..7b45393ad98e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -280,7 +280,7 @@ out_bad_resource:
}
/**
- * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
@@ -990,7 +990,6 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- ttm_write_lock(&dev_priv->reservation_sem, interruptible);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
@@ -1029,7 +1028,6 @@ out_no_validate:
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1047,7 +1045,6 @@ void vmw_resource_unpin(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- (void) ttm_read_lock(&dev_priv->reservation_sem, false);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, false, true);
@@ -1065,7 +1062,6 @@ void vmw_resource_unpin(struct vmw_resource *res)
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
- ttm_read_unlock(&dev_priv->reservation_sem);
}
/**
@@ -1079,7 +1075,7 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
}
/**
- * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * vmw_resource_dirty_update - Update a resource's dirty tracker with a
* sequential range of touched backing store memory.
* @res: The resource.
* @start: The first page touched.
@@ -1170,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
if (bo->moving)
dma_fence_put(bo->moving);
bo->moving = dma_fence_get
- (dma_resv_get_excl(bo->base.resv));
+ (dma_resv_excl_fence(bo->base.resv));
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 9bc9a0714664..145430d14219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1222,7 +1222,7 @@ static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
+ * vmw_kms_sou_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the buffer-object backed framebuffer.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a0db06564013..b3c8d2da6f1a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -254,7 +254,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -263,7 +263,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -282,7 +282,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd;
struct vmw_fence_obj *fence;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -402,7 +402,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -450,7 +450,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
@@ -513,7 +513,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence;
int ret;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
@@ -876,15 +876,9 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
goto out_bad_arg;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_bad_arg;
-
ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
-
- ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_bo_unreference(&buffer);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 73e9a487e659..33b69a70cfe3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -162,13 +162,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
TTM_OBJ_EXTRA_SIZE;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (ret)
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
&ctx);
- ttm_read_unlock(&dev_priv->reservation_sem);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for %s"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 2877c7b43bd7..c3a8d6e8380e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -33,7 +33,7 @@
* The currently only reason we need to keep track of views is that if we
* destroy a hardware surface, all views pointing to it must also be destroyed,
* otherwise the device will error.
- * So in particuar if a surface is evicted, we must destroy all views pointing
+ * So in particular if a surface is evicted, we must destroy all views pointing
* to it, and all context bindings of that view. Similarly we must restore
* the view bindings, views and surfaces pointed to by the views when a
* context is referenced in the command stream.
@@ -90,7 +90,7 @@ static const struct vmw_res_func vmw_view_func = {
};
/**
- * struct vmw_view - view define command body stub
+ * struct vmw_view_define - view define command body stub
*
* @view_id: The device id of the view being defined
* @sid: The surface id of the view being defined
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 7b11f0285786..9e236f9f8a8a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -742,7 +742,7 @@ out_unref:
}
/**
- * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ * vmw_kms_stdu_surface_clip - Callback to encode a surface copy command cliprect
*
* @dirty: The closure structure.
*
@@ -780,7 +780,7 @@ static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * vmw_kms_stdu_surface_fifo_commit - Callback to fill in and submit a surface
* copy command.
*
* @dirty: The closure structure.
@@ -1571,7 +1571,7 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
/**
* vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
- * @old_state: Only used to get crtc info
+ * @state: Only used to get crtc info
*
* Formally update stdu->display_srf to the new plane, and bind the new
* plane STDU. This function is called during the commit phase when
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 1dd042a20a66..c8efa4a6c995 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -106,7 +106,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -142,7 +142,7 @@ static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
int ret;
- if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence;
int ret;
- if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c3e55c1376eb..0835468bb2ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -680,7 +680,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
}
/**
- * vmw_user_surface_free - User visible surface TTM base object destructor
+ * vmw_user_surface_base_release - User visible surface TTM base object destructor
*
* @p_base: Pointer to a pointer to a TTM base object
* embedded in a struct vmw_user_surface.
@@ -702,7 +702,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
}
/**
- * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * vmw_surface_destroy_ioctl - Ioctl function implementing
* the user surface destroy functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -719,7 +719,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -779,10 +779,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
size, &ctx);
if (unlikely(ret != 0)) {
@@ -913,7 +909,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
- ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_copy:
kfree(srf->offsets);
@@ -924,7 +919,6 @@ out_no_sizes:
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1007,7 +1001,7 @@ out_no_lookup:
}
/**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_reference_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -1061,7 +1055,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * vmw_gb_surface_create - Encode a surface_define command.
*
* @res: Pointer to a struct vmw_resource embedded in a struct
* vmw_surface.
@@ -1218,7 +1212,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
@@ -1229,7 +1223,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
- cmd1->body.mobid = bo->mem.start;
+ cmd1->body.mobid = bo->resource->start;
if (res->backup_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
@@ -1272,7 +1266,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
@@ -1542,10 +1536,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
@@ -1627,7 +1617,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
vmw_resource_unreference(&res);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1804,6 +1793,19 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
svga3dsurface_get_loc(cache, &loc2, end - 1);
svga3dsurface_inc_loc(cache, &loc2);
+ if (loc1.sheet != loc2.sheet) {
+ u32 sub_res;
+
+ /*
+ * Multiple multisample sheets. To do this in an optimized
+ * fashion, compute the dirty region for each sheet and the
+ * resulting union. Since this is not a common case, just dirty
+ * the whole surface.
+ */
+ for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
+ vmw_subres_dirty_full(dirty, sub_res);
+ return;
+ }
if (loc1.sub_resource + 1 == loc2.sub_resource) {
/* Dirty range covers a single sub-resource */
vmw_subres_dirty_add(dirty, &loc1, &loc2);
@@ -2112,10 +2114,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL)
return -EINVAL;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
user_accounting_size, &ctx);
if (ret != 0) {
@@ -2179,13 +2177,11 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index eb63cbe64909..2a3d3468e4e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -7,6 +7,7 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -28,15 +29,16 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
static const struct ttm_resource_manager_func vmw_thp_func;
-static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
+ struct drm_mm *mm, struct drm_mm_node *node,
unsigned long align_pages,
const struct ttm_place *place,
struct ttm_resource *mem,
unsigned long lpfn,
enum drm_mm_insert_mode mode)
{
- if (align_pages >= mem->page_alignment &&
- (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+ if (align_pages >= bo->page_alignment &&
+ (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
return drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
align_pages, 0,
@@ -49,20 +51,22 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node;
+ struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
+ ttm_resource_init(bo, place, &node->base);
+
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
@@ -74,24 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(mm, node, align_pages,
- place, mem, lpfn, mode);
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place,
+ &node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place, &node->base,
lpfn, mode);
if (!ret)
goto found_unlock;
}
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
- mem->page_alignment, 0,
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ node->base.num_pages,
+ bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);
@@ -99,28 +106,24 @@ found_unlock:
if (unlikely(ret)) {
kfree(node);
} else {
- mem->mm_node = node;
- mem->start = node->start;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
}
return ret;
}
-
-
static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_remove_node(mem->mm_node);
- spin_unlock(&rman->lock);
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
- }
+ kfree(node);
}
int vmw_thp_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2dc031fe4a90..0488042fb287 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -200,7 +200,8 @@ struct vmw_ttm_tt {
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
- * Helper functions to advance a struct vmw_piter iterator.
+ * __vmw_piter_non_sg_next: Helper functions to advance
+ * a struct vmw_piter iterator.
*
* @viter: Pointer to the iterator.
*
@@ -222,7 +223,8 @@ static bool __vmw_piter_sg_next(struct vmw_piter *viter)
/**
- * Helper functions to return a pointer to the current page.
+ * __vmw_piter_non_sg_page: Helper functions to return a pointer
+ * to the current page.
*
* @viter: Pointer to the iterator
*
@@ -236,7 +238,8 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
}
/**
- * Helper functions to return the DMA address of the current page.
+ * __vmw_piter_phys_addr: Helper functions to return the DMA
+ * address of the current page.
*
* @viter: Pointer to the iterator
*
@@ -658,14 +661,6 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct ttm_object_file *tfile =
- vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
-
- return vmw_user_bo_verify_access(bo, tfile);
-}
-
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
@@ -724,7 +719,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
@@ -734,11 +729,11 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
- vmw_move_notify(bo, &bo->mem, new_mem);
+ vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- ttm_bo_assign_mem(bo, new_mem);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_wait_ctx(bo, ctx);
@@ -746,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
return 0;
} else {
@@ -756,7 +751,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
}
return 0;
fail:
- vmw_move_notify(bo, new_mem, &bo->mem);
+ vmw_move_notify(bo, new_mem, bo->resource);
return ret;
}
@@ -768,7 +763,6 @@ struct ttm_device_funcs vmw_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = vmw_move,
- .verify_access = vmw_verify_access,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index cb9975889e2f..e6b1f98ec99f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -27,6 +27,32 @@
#include "vmwgfx_drv.h"
+static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
+ unsigned long offset,
+ unsigned long pages)
+{
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct drm_device *drm = &dev_priv->drm;
+ struct drm_vma_offset_node *node;
+ struct ttm_buffer_object *bo = NULL;
+
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
+
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
+ if (likely(node)) {
+ bo = container_of(node, struct ttm_buffer_object,
+ base.vma_node);
+ bo = ttm_bo_get_unless_zero(bo);
+ }
+
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
+
+ if (!bo)
+ drm_err(drm, "Could not find buffer object to map\n");
+
+ return bo;
+}
+
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
static const struct vm_operations_struct vmw_vm_ops = {
@@ -41,10 +67,25 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct ttm_buffer_object *bo;
+ int ret;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
+ return -EINVAL;
+
+ bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+ if (unlikely(!bo))
+ return -EINVAL;
- if (ret)
- return ret;
+ ret = vmw_user_bo_verify_access(bo, tfile);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (unlikely(ret != 0))
+ goto out_unref;
vma->vm_ops = &vmw_vm_ops;
@@ -52,7 +93,13 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (!is_cow_mapping(vma->vm_flags))
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+ ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
+
return 0;
+
+out_unref:
+ ttm_bo_put(bo);
+ return ret;
}
/* struct vmw_validation_mem callback */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7570f422400..8338b1d20f2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -82,7 +82,7 @@ struct vmw_validation_res_node {
u32 reserved : 1;
u32 dirty : 1;
u32 dirty_set : 1;
- unsigned long private[0];
+ unsigned long private[];
};
/**
@@ -809,7 +809,7 @@ void vmw_validation_revert(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_cone - Commit validation actions after command submission
+ * vmw_validation_done - Commit validation actions after command submission
* success.
* @ctx: The validation context.
* @fence: Fence with which to fence all buffer objects taking part in the