diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r-- | drivers/gpu/drm/virtio/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 79 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 185 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 154 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vram.c | 164 |
12 files changed, 705 insertions, 70 deletions
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 92aa2b3d349d..b99fa4a73b68 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -3,7 +3,7 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \ virtgpu_display.o virtgpu_vq.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index d5b0c543bd6d..f336a8fa6666 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -42,15 +42,21 @@ static void virtio_add_int(struct seq_file *m, const char *name, static int virtio_gpu_features(struct seq_file *m, void *data) { - struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_info_node *node = (struct drm_info_node *)m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; virtio_add_bool(m, "virgl", vgdev->has_virgl_3d); virtio_add_bool(m, "edid", vgdev->has_edid); virtio_add_bool(m, "indirect", vgdev->has_indirect); virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid); + virtio_add_bool(m, "blob resources", vgdev->has_resource_blob); virtio_add_int(m, "cap sets", vgdev->num_capsets); virtio_add_int(m, "scanouts", vgdev->num_scanouts); + if (vgdev->host_visible_region.len) { + seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } return 0; } @@ -66,9 +72,27 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) return 0; } +static int +virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + struct drm_printer p; + + if (!vgdev->has_host_visible) { + seq_puts(m, "Host allocations not visible to guest\n"); + return 0; + } + + p = drm_seq_file_printer(m); + drm_mm_print(&vgdev->host_visible_mm, &p); + return 0; +} + static struct drm_info_list virtio_gpu_debugfs_list[] = { { "virtio-gpu-features", virtio_gpu_features }, { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL }, + { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm }, }; #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index f84b7e61311b..a6caebd4a0dd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -95,12 +95,12 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct virtio_gpu_device *vgdev = dev->dev_private; @@ -111,14 +111,16 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, } static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { return 0; } static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); /* @@ -127,7 +129,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc->state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { output->needs_modeset = true; } } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b039f493bda9..27f13bd29c13 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -37,7 +37,7 @@ #include "virtgpu_drv.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -166,6 +166,7 @@ static unsigned int features[] = { #endif VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_UUID, + VIRTIO_GPU_F_RESOURCE_BLOB, }; static struct virtio_driver virtio_gpu_driver = { .feature_table = features, @@ -189,7 +190,7 @@ MODULE_AUTHOR("Alon Levy"); DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, @@ -203,7 +204,6 @@ static struct drm_driver driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_mmap = drm_gem_prime_mmap, - .gem_prime_export = virtgpu_gem_prime_export, .gem_prime_import = virtgpu_gem_prime_import, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 55c34b4fc3e9..3c0e17212c33 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> @@ -49,18 +50,21 @@ #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 -#define UUID_INITIALIZING 0 -#define UUID_INITIALIZED 1 -#define UUID_INITIALIZATION_FAILED 2 +#define STATE_INITIALIZING 0 +#define STATE_OK 1 +#define STATE_ERR 2 struct virtio_gpu_object_params { - uint32_t format; - uint32_t width; - uint32_t height; unsigned long size; bool dumb; /* 3d */ bool virgl; + bool blob; + + /* classic resources only */ + uint32_t format; + uint32_t width; + uint32_t height; uint32_t target; uint32_t bind; uint32_t depth; @@ -68,6 +72,12 @@ struct virtio_gpu_object_params { uint32_t last_level; uint32_t nr_samples; uint32_t flags; + + /* blob resources only */ + uint32_t ctx_id; + uint32_t blob_mem; + uint32_t blob_flags; + uint64_t blob_id; }; struct virtio_gpu_object { @@ -75,6 +85,8 @@ struct virtio_gpu_object { uint32_t hw_res_handle; bool dumb; bool created; + bool host3d_blob, guest_blob; + uint32_t blob_mem, blob_flags; int uuid_state; uuid_t uuid; @@ -88,9 +100,19 @@ struct virtio_gpu_object_shmem { uint32_t mapped; }; +struct virtio_gpu_object_vram { + struct virtio_gpu_object base; + uint32_t map_state; + uint32_t map_info; + struct drm_mm_node vram_node; +}; + #define to_virtio_gpu_shmem(virtio_gpu_object) \ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) +#define to_virtio_gpu_vram(virtio_gpu_object) \ + container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base) + struct virtio_gpu_object_array { struct ww_acquire_ctx ticket; struct list_head next; @@ -208,6 +230,10 @@ struct virtio_gpu_device { bool has_edid; bool has_indirect; bool has_resource_assign_uuid; + bool has_resource_blob; + bool has_host_visible; + struct virtio_shm_region host_visible_region; + struct drm_mm host_visible_mm; struct work_struct config_changed_work; @@ -219,8 +245,10 @@ struct virtio_gpu_device { uint32_t num_capsets; struct list_head cap_cache; - /* protects resource state when exporting */ + /* protects uuid state when exporting */ spinlock_t resource_export_lock; + /* protects map state and host_visible_mm */ + spinlock_t host_visible_lock; }; struct virtio_gpu_fpriv { @@ -229,8 +257,8 @@ struct virtio_gpu_fpriv { struct mutex context_lock; }; -/* virtgpu_ioctl.c */ -#define DRM_VIRTIO_NUM_IOCTLS 10 +/* virtio_ioctl.c */ +#define DRM_VIRTIO_NUM_IOCTLS 11 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -323,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); @@ -351,6 +383,26 @@ int virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs); +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset); + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents); +void +virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y); + /* virtgpu_display.c */ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); @@ -381,7 +433,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, + uint32_t *resid); /* virtgpu_prime.c */ +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, int flags); struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, @@ -395,4 +451,9 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table( /* virtgpu_debugfs.c */ void virtio_gpu_debugfs_init(struct drm_minor *minor); +/* virtgpu_vram.c */ +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr); #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index c8da7adc6b30..5417f365d1a3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -34,6 +34,10 @@ #include "virtgpu_drv.h" +#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \ + VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ + VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) + void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; @@ -208,11 +212,20 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, switch (param->param) { case VIRTGPU_PARAM_3D_FEATURES: - value = vgdev->has_virgl_3d == true ? 1 : 0; + value = vgdev->has_virgl_3d ? 1 : 0; break; case VIRTGPU_PARAM_CAPSET_QUERY_FIX: value = 1; break; + case VIRTGPU_PARAM_RESOURCE_BLOB: + value = vgdev->has_resource_blob ? 1 : 0; + break; + case VIRTGPU_PARAM_HOST_VISIBLE: + value = vgdev->has_host_visible ? 1 : 0; + break; + case VIRTGPU_PARAM_CROSS_DEVICE: + value = vgdev->has_resource_assign_uuid ? 1 : 0; + break; default: return -EINVAL; } @@ -301,6 +314,9 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; + if (qobj->host3d_blob || qobj->guest_blob) + ri->blob_mem = qobj->blob_mem; + drm_gem_object_put(gobj); return 0; } @@ -312,6 +328,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; + struct virtio_gpu_object *bo; struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; @@ -325,6 +342,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (objs == NULL) return -ENOENT; + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; @@ -334,9 +362,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ret = -ENOMEM; goto err_unlock; } + virtio_gpu_cmd_transfer_from_host_3d - (vgdev, vfpriv->ctx_id, offset, args->level, - &args->box, objs, fence); + (vgdev, vfpriv->ctx_id, offset, args->level, args->stride, + args->layer_stride, &args->box, objs, fence); dma_fence_put(&fence->f); virtio_gpu_notify(vgdev); return 0; @@ -354,6 +383,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; + struct virtio_gpu_object *bo; struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; @@ -363,6 +393,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, if (objs == NULL) return -ENOENT; + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, offset, @@ -370,6 +406,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, objs, NULL); } else { virtio_gpu_create_context(dev, file); + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; @@ -381,8 +423,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_transfer_to_host_3d (vgdev, - vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &args->box, objs, fence); + vfpriv ? vfpriv->ctx_id : 0, offset, args->level, + args->stride, args->layer_stride, &args->box, objs, + fence); dma_fence_put(&fence->f); } virtio_gpu_notify(vgdev); @@ -491,6 +534,134 @@ copy_exit: return 0; } +static int verify_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fpriv *vfpriv, + struct virtio_gpu_object_params *params, + struct drm_virtgpu_resource_create_blob *rc_blob, + bool *guest_blob, bool *host3d_blob) +{ + if (!vgdev->has_resource_blob) + return -EINVAL; + + if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) || + !rc_blob->blob_flags) + return -EINVAL; + + if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + if (!vgdev->has_resource_assign_uuid) + return -EINVAL; + } + + switch (rc_blob->blob_mem) { + case VIRTGPU_BLOB_MEM_GUEST: + *guest_blob = true; + break; + case VIRTGPU_BLOB_MEM_HOST3D_GUEST: + *guest_blob = true; + fallthrough; + case VIRTGPU_BLOB_MEM_HOST3D: + *host3d_blob = true; + break; + default: + return -EINVAL; + } + + if (*host3d_blob) { + if (!vgdev->has_virgl_3d) + return -EINVAL; + + /* Must be dword aligned. */ + if (rc_blob->cmd_size % 4 != 0) + return -EINVAL; + + params->ctx_id = vfpriv->ctx_id; + params->blob_id = rc_blob->blob_id; + } else { + if (rc_blob->blob_id != 0) + return -EINVAL; + + if (rc_blob->cmd_size != 0) + return -EINVAL; + } + + params->blob_mem = rc_blob->blob_mem; + params->size = rc_blob->size; + params->blob = true; + params->blob_flags = rc_blob->blob_flags; + return 0; +} + +static int virtio_gpu_resource_create_blob(struct drm_device *dev, + void *data, struct drm_file *file) +{ + int ret = 0; + uint32_t handle = 0; + bool guest_blob = false; + bool host3d_blob = false; + struct drm_gem_object *obj; + struct virtio_gpu_object *bo; + struct virtio_gpu_object_params params = { 0 }; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_resource_create_blob *rc_blob = data; + + if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob, + &guest_blob, &host3d_blob)) + return -EINVAL; + + if (vgdev->has_virgl_3d) + virtio_gpu_create_context(dev, file); + + if (rc_blob->cmd_size) { + void *buf; + + buf = memdup_user(u64_to_user_ptr(rc_blob->cmd), + rc_blob->cmd_size); + + if (IS_ERR(buf)) + return PTR_ERR(buf); + + virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size, + vfpriv->ctx_id, NULL, NULL); + } + + if (guest_blob) + ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL); + else if (!guest_blob && host3d_blob) + ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo); + else + return -EINVAL; + + if (ret < 0) + return ret; + + bo->guest_blob = guest_blob; + bo->host3d_blob = host3d_blob; + bo->blob_mem = rc_blob->blob_mem; + bo->blob_flags = rc_blob->blob_flags; + + obj = &bo->base.base; + if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + } + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + drm_gem_object_put(obj); + + rc_blob->res_handle = bo->hw_res_handle; + rc_blob->bo_handle = handle; + + return 0; +} + struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, DRM_RENDER_ALLOW), @@ -523,4 +694,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, + virtio_gpu_resource_create_blob, + DRM_RENDER_ALLOW), }; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index eed57a931309..b4ec479c32cd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -121,6 +121,7 @@ int virtio_gpu_init(struct drm_device *dev) spin_lock_init(&vgdev->display_info_lock); spin_lock_init(&vgdev->resource_export_lock); + spin_lock_init(&vgdev->host_visible_lock); ida_init(&vgdev->ctx_id_ida); ida_init(&vgdev->resource_ida); init_waitqueue_head(&vgdev->resp_wq); @@ -152,10 +153,33 @@ int virtio_gpu_init(struct drm_device *dev) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { vgdev->has_resource_assign_uuid = true; } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + vgdev->has_resource_blob = true; + } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, + VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { + if (!devm_request_mem_region(&vgdev->vdev->dev, + vgdev->host_visible_region.addr, + vgdev->host_visible_region.len, + dev_name(&vgdev->vdev->dev))) { + DRM_ERROR("Could not reserve host visible region\n"); + goto err_vqs; + } + + DRM_INFO("Host memory window: 0x%lx +0x%lx\n", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + vgdev->has_host_visible = true; + drm_mm_init(&vgdev->host_visible_mm, + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } - DRM_INFO("features: %cvirgl %cedid\n", - vgdev->has_virgl_3d ? '+' : '-', - vgdev->has_edid ? '+' : '-'); + DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-', + vgdev->has_resource_blob ? '+' : '-', + vgdev->has_host_visible ? '+' : '-'); ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { @@ -242,6 +266,10 @@ void virtio_gpu_release(struct drm_device *dev) virtio_gpu_modeset_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); + + if (vgdev->has_host_visible) + drm_mm_takedown(&vgdev->host_visible_mm); + kfree(vgdev->capsets); kfree(vgdev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 00d6b95e259d..d9ad27e00905 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -31,8 +31,7 @@ static int virtio_gpu_virglrenderer_workaround = 1; module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); -static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - uint32_t *resid) +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { if (virtio_gpu_virglrenderer_workaround) { /* @@ -84,6 +83,18 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) } drm_gem_shmem_free_object(&bo->base.base); + } else if (virtio_gpu_is_vram(bo)) { + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + spin_lock(&vgdev->host_visible_lock); + if (drm_mm_node_allocated(&vram->vram_node)) + drm_mm_remove_node(&vram->vram_node); + + spin_unlock(&vgdev->host_visible_lock); + + drm_gem_free_mmap_offset(&vram->base.base.base); + drm_gem_object_release(&vram->base.base.base); + kfree(vram); } } @@ -107,6 +118,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .close = virtio_gpu_gem_object_close, .print_info = drm_gem_shmem_print_info, + .export = virtgpu_gem_prime_export, .pin = drm_gem_shmem_pin, .unpin = drm_gem_shmem_unpin, .get_sg_table = drm_gem_shmem_get_sg_table, @@ -172,8 +184,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, *nents = shmem->pages->orig_nents; } - *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); + *ents = kvmalloc_array(*nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); if (!(*ents)) { DRM_ERROR("failed to allocate ent list\n"); return -ENOMEM; @@ -234,21 +247,24 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, goto err_put_objs; } - if (params->virgl) { - virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, - objs, fence); - } else { - virtio_gpu_cmd_create_resource(vgdev, bo, params, - objs, fence); - } - ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); if (ret != 0) { virtio_gpu_free_object(&shmem_obj->base); return ret; } - virtio_gpu_object_attach(vgdev, bo, ents, nents); + if (params->blob) { + virtio_gpu_cmd_resource_create_blob(vgdev, bo, params, + ents, nents); + } else if (params->virgl) { + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } else { + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } *bo_ptr = bo; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 6a311cd93440..42ac08ed1442 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -174,12 +174,23 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, plane->state->src_h >> 16, plane->state->src_x >> 16, plane->state->src_y >> 16); - virtio_gpu_cmd_set_scanout(vgdev, output->index, - bo->hw_res_handle, - plane->state->src_w >> 16, - plane->state->src_h >> 16, - plane->state->src_x >> 16, - plane->state->src_y >> 16); + + if (bo->host3d_blob || bo->guest_blob) { + virtio_gpu_cmd_set_scanout_blob + (vgdev, output->index, bo, + plane->state->fb, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } else { + virtio_gpu_cmd_set_scanout(vgdev, output->index, + bo->hw_res_handle, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } } virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index acd14ef73d56..807a27a16365 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -34,8 +34,8 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; - wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING); - if (bo->uuid_state != UUID_INITIALIZED) + wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); + if (bo->uuid_state != STATE_OK) return -ENODEV; uuid_copy(uuid, &bo->uuid); @@ -43,7 +43,7 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, return 0; } -const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { +static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, @@ -59,6 +59,24 @@ const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .get_uuid = virtgpu_virtio_get_uuid, }; +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + int ret; + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + + virtio_gpu_array_add_obj(objs, &bo->base.base); + ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); + if (ret) + return ret; + + return 0; +} + struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, int flags) { @@ -66,22 +84,20 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, struct drm_device *dev = obj->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - struct virtio_gpu_object_array *objs; int ret = 0; + bool blob = bo->host3d_blob || bo->guest_blob; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - if (vgdev->has_resource_assign_uuid) { - objs = virtio_gpu_array_alloc(1); - if (!objs) - return ERR_PTR(-ENOMEM); - virtio_gpu_array_add_obj(objs, &bo->base.base); - - ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); - if (ret) - return ERR_PTR(ret); - virtio_gpu_notify(vgdev); - } else { - bo->uuid_state = UUID_INITIALIZATION_FAILED; + if (!blob) { + if (vgdev->has_resource_assign_uuid) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) + return ERR_PTR(ret); + + virtio_gpu_notify(vgdev); + } else { + bo->uuid_state = STATE_ERR; + } } exp_info.ops = &virtgpu_dmabuf_ops.ops; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 07945ca238e2..cf84d382dd41 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1016,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) @@ -1024,11 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); - struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); - if (use_dma_api) + if (virtio_gpu_is_shmem(bo) && use_dma_api) { + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, shmem->pages, DMA_TO_DEVICE); + } cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); @@ -1041,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } @@ -1048,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) @@ -1067,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } @@ -1125,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, uint32_t resp_type = le32_to_cpu(resp->hdr.type); spin_lock(&vgdev->resource_export_lock); - WARN_ON(obj->uuid_state != UUID_INITIALIZING); + WARN_ON(obj->uuid_state != STATE_INITIALIZING); if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && - obj->uuid_state == UUID_INITIALIZING) { - memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b)); - obj->uuid_state = UUID_INITIALIZED; + obj->uuid_state == STATE_INITIALIZING) { + import_uuid(&obj->uuid, resp->uuid); + obj->uuid_state = STATE_OK; } else { - obj->uuid_state = UUID_INITIALIZATION_FAILED; + obj->uuid_state = STATE_ERR; } spin_unlock(&vgdev->resource_export_lock); @@ -1151,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); if (!resp_buf) { spin_lock(&vgdev->resource_export_lock); - bo->uuid_state = UUID_INITIALIZATION_FAILED; + bo->uuid_state = STATE_ERR; spin_unlock(&vgdev->resource_export_lock); virtio_gpu_array_put_free(objs); return -ENOMEM; @@ -1169,3 +1178,132 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); return 0; } + +static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_map_info *resp = + (struct virtio_gpu_resp_map_info *)vbuf->resp_buf; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->host_visible_lock); + + if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) { + vram->map_info = resp->map_info; + vram->map_state = STATE_OK; + } else { + vram->map_state = STATE_ERR; + } + + spin_unlock(&vgdev->host_visible_lock); + wake_up_all(&vgdev->resp_wq); +} + +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset) +{ + struct virtio_gpu_resource_map_blob *cmd_p; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_map_info *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_map_info), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + vbuf->objs = objs; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_unmap_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents) +{ + struct virtio_gpu_resource_create_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB); + cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->blob_mem = cpu_to_le32(params->blob_mem); + cmd_p->blob_flags = cpu_to_le32(params->blob_flags); + cmd_p->blob_id = cpu_to_le64(params->blob_id); + cmd_p->size = cpu_to_le64(params->size); + cmd_p->nr_entries = cpu_to_le32(nents); + + vbuf->data_buf = ents; + vbuf->data_size = sizeof(*ents) * nents; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; +} + +void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y) +{ + uint32_t i; + struct virtio_gpu_set_scanout_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + uint32_t format = virtio_gpu_translate_format(fb->format->format); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->scanout_id = cpu_to_le32(scanout_id); + + cmd_p->format = cpu_to_le32(format); + cmd_p->width = cpu_to_le32(fb->width); + cmd_p->height = cpu_to_le32(fb->height); + + for (i = 0; i < 4; i++) { + cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]); + cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]); + } + + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c new file mode 100644 index 000000000000..23c21bc4d01e --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "virtgpu_drv.h" + +static void virtio_gpu_vram_free(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + bool unmap; + + if (bo->created) { + spin_lock(&vgdev->host_visible_lock); + unmap = drm_mm_node_allocated(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + + if (unmap) + virtio_gpu_cmd_unmap(vgdev, bo); + + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + return; + } +} + +static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + unsigned long vm_size = vma->vm_end - vma->vm_start; + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) + return -EINVAL; + + wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); + if (vram->map_state != STATE_OK) + return -EINVAL; + + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &virtio_gpu_vram_vm_ops; + + if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* Partial mappings of GEM buffers don't happen much in practice. */ + if (vm_size != vram->vram_node.size) + return -EINVAL; + + ret = io_remap_pfn_range(vma, vma->vm_start, + vram->vram_node.start >> PAGE_SHIFT, + vm_size, vma->vm_page_prot); + return ret; +} + +static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + .free = virtio_gpu_vram_free, + .mmap = virtio_gpu_vram_mmap, +}; + +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) +{ + return bo->base.base.funcs == &virtio_gpu_vram_funcs; +} + +static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) +{ + int ret; + uint64_t offset; + struct virtio_gpu_object_array *objs; + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + if (!vgdev->has_host_visible) + return -EINVAL; + + spin_lock(&vgdev->host_visible_lock); + ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node, + bo->base.base.size); + spin_unlock(&vgdev->host_visible_lock); + + if (ret) + return ret; + + objs = virtio_gpu_array_alloc(1); + if (!objs) { + ret = -ENOMEM; + goto err_remove_node; + } + + virtio_gpu_array_add_obj(objs, &bo->base.base); + /*TODO: Add an error checking helper function in drm_mm.h */ + offset = vram->vram_node.start - vgdev->host_visible_region.addr; + + ret = virtio_gpu_cmd_map(vgdev, objs, offset); + if (ret) { + virtio_gpu_array_put_free(objs); + goto err_remove_node; + } + + return 0; + +err_remove_node: + spin_lock(&vgdev->host_visible_lock); + drm_mm_remove_node(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + return ret; +} + +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr) +{ + struct drm_gem_object *obj; + struct virtio_gpu_object_vram *vram; + int ret; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + obj = &vram->base.base.base; + obj->funcs = &virtio_gpu_vram_funcs; + drm_gem_private_object_init(vgdev->ddev, obj, params->size); + + /* Create fake offset */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) { + kfree(vram); + return ret; + } + + ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle); + if (ret) { + kfree(vram); + return ret; + } + + virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL, + 0); + if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { + ret = virtio_gpu_vram_map(&vram->base); + if (ret) { + virtio_gpu_vram_free(obj); + return ret; + } + } + + *bo_ptr = &vram->base; + return 0; +} |