summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2023-05-12 10:47:12 +0300
committerMaxime Ripard <maxime@cerno.tech>2023-05-12 10:47:12 +0300
commit50282fd57bcd3525c9d81eef58df8718e4337c6d (patch)
tree9795dfdb1a9b66904d9320f33c9216683d10576a /drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
parentad81e23426a651eb89a4b306e1c4169e6308c124 (diff)
parentac9a78681b921877518763ba0e89202254349d1b (diff)
downloadlinux-50282fd57bcd3525c9d81eef58df8718e4337c6d.tar.xz
Merge drm/drm-fixes into drm-misc-fixes
Let's bring 6.4-rc1 in drm-misc-fixes to start the new fix cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c107
1 files changed, 55 insertions, 52 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index dcfb003841b3..5db403ee8261 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,8 +25,7 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -34,6 +33,8 @@
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
+#include <drm/ttm/ttm_placement.h>
+
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
@@ -125,12 +126,13 @@ const struct vmw_user_resource_conv *user_surface_converter =
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = true,
.prio = 1,
.dirty_prio = 1,
.type_name = "legacy surfaces",
- .backup_placement = &vmw_srf_placement,
+ .domain = VMW_BO_DOMAIN_GMR,
+ .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
@@ -139,12 +141,13 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 1,
.dirty_prio = 2,
.type_name = "guest backed surfaces",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
@@ -379,7 +382,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
}
@@ -409,7 +412,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
return 0;
srf = vmw_res_to_srf(res);
- if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
dev_priv->memory_size))
return -EBUSY;
@@ -447,7 +450,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size += res->backup_size;
+ dev_priv->used_memory_size += res->guest_memory_size;
return 0;
out_no_fifo:
@@ -524,7 +527,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
- if (!res->backup_dirty)
+ if (!res->guest_memory_dirty)
return 0;
return vmw_legacy_srf_dma(res, val_buf, true);
@@ -583,7 +586,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
/*
* Release the surface ID.
@@ -683,8 +686,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (res && res->backup)
- drm_gem_object_put(&res->backup->base.base);
+ if (res->guest_memory_bo)
+ drm_gem_object_put(&res->guest_memory_bo->tbo.base);
*p_base = NULL;
vmw_resource_unreference(&res);
@@ -812,7 +815,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
++cur_size;
}
}
- res->backup_size = cur_bo_offset;
+ res->guest_memory_size = cur_bo_offset;
if (metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
@@ -856,14 +859,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
@@ -872,7 +875,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release);
@@ -1186,7 +1189,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
- submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+ submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
@@ -1196,7 +1199,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->resource->start;
- if (res->backup_dirty) {
+ if (res->guest_memory_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
@@ -1204,12 +1207,12 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
}
vmw_cmd_commit(dev_priv, submit_size);
- if (res->backup->dirty && res->backup_dirty) {
+ if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
/* We've just made a full upload. Cear dirty regions. */
vmw_bo_dirty_clear_res(res);
}
- res->backup_dirty = false;
+ res->guest_memory_dirty = false;
return 0;
}
@@ -1505,11 +1508,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0) {
- if (res->backup->base.base.size < res->backup_size) {
+ if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
@@ -1520,11 +1523,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
(drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) {
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0)
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
@@ -1533,9 +1536,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *backup = res->guest_memory_bo;
- ttm_bo_reserve(&backup->base, false, false, NULL);
+ ttm_bo_reserve(&backup->tbo, false, false, NULL);
if (!res->func->dirty_alloc)
ret = -EINVAL;
if (!ret)
@@ -1544,7 +1547,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res->coherent = true;
ret = res->func->dirty_alloc(res);
}
- ttm_bo_unreserve(&backup->base);
+ ttm_bo_unreserve(&backup->tbo);
if (ret) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -1553,7 +1556,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
@@ -1566,11 +1569,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
rep->handle = user_srf->prime.base.handle;
- rep->backup_size = res->backup_size;
- if (res->backup) {
+ rep->backup_size = res->guest_memory_size;
+ if (res->guest_memory_bo) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.base.size;
+ drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
+ rep->buffer_size = res->guest_memory_bo->tbo.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1613,14 +1616,14 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (!srf->res.backup) {
+ if (!srf->res.guest_memory_bo) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
if (ret != 0) {
@@ -1639,11 +1642,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
- rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.backup_size = srf->res.guest_memory_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.base.size;
+ drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node);
+ rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1742,12 +1745,12 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t backup_end = res->backup_offset + res->backup_size;
+ size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_surface_loc loc1, loc2;
const struct vmw_surface_cache *cache;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
cache = &dirty->cache;
vmw_surface_get_loc(cache, &loc1, start);
vmw_surface_get_loc(cache, &loc2, end - 1);
@@ -1794,13 +1797,13 @@ static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
const struct vmw_surface_cache *cache = &dirty->cache;
- size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
SVGA3dBox *box = &dirty->boxes[0];
u32 box_c2;
box->h = box->d = 1;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
box_c2 = box->x + box->w;
if (box->w == 0 || box->x > start)
box->x = start;
@@ -1816,8 +1819,8 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
{
struct vmw_surface *srf = vmw_res_to_srf(res);
- if (WARN_ON(end <= res->backup_offset ||
- start >= res->backup_offset + res->backup_size))
+ if (WARN_ON(end <= res->guest_memory_offset ||
+ start >= res->guest_memory_offset + res->guest_memory_size))
return;
if (srf->metadata.format == SVGA3D_BUFFER)
@@ -2074,7 +2077,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
sample_count = metadata->multisample_count;
- srf->res.backup_size =
+ srf->res.guest_memory_size =
vmw_surface_get_serialized_size_extended(
metadata->format,
metadata->base_size,
@@ -2083,7 +2086,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
sample_count);
if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
- srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
/*
* Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with