summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_plane.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_plane.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c125
1 files changed, 106 insertions, 19 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a72a2dbda031..42aa554eca9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
@@ -66,11 +68,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -138,11 +157,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (vgfb->fence) {
+ if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -151,13 +172,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
- dma_fence_wait_timeout(&vgfb->fence->f, true,
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
msecs_to_jiffies(50));
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
@@ -241,45 +260,113 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y2 - rect.y1);
}
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ obj = new_state->fb->obj[0];
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ return ret;
+ }
+
+ if (bo->dumb || obj->import_attach) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgplane_st->fence)
return -ENOMEM;
}
return 0;
}
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
+{
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+}
+
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (obj->import_attach)
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -291,6 +378,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -303,6 +391,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -322,11 +411,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {