diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-28 04:45:48 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-28 04:45:48 +0300 |
commit | a6ed68d6468bd5a3da78a103344ded1435fed57a (patch) | |
tree | be42a3609d7e9a2581806aab5bc1ace42f9ca992 /drivers/gpu/drm/virtio/virtgpu_drv.h | |
parent | 8c39f71ee2019e77ee14f88b1321b2348db51820 (diff) | |
parent | acc61b8929365e63a3e8c8c8913177795aa45594 (diff) | |
download | linux-a6ed68d6468bd5a3da78a103344ded1435fed57a.tar.xz |
Merge tag 'drm-next-2019-11-27' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"Lots of stuff in here, though it hasn't been too insane this merge
apart from dealing with the security fun.
uapi:
- export different colorspace properties on DP vs HDMI
- new fourcc for ARM 16x16 block format
- syncobj: allow querying last submitted timeline value
- DRM_FORMAT_BIG_ENDIAN defined as unsigned
core:
- allow using gem vma manager in ttm
- connector/encoder/bridge doc fixes
- allow more than 3 encoders for a connector
- displayport mst suspend/resume reprobing support
- vram lazy unmapping, uniform vram mm and gem vram
- edid cleanups + AVI informframe bar info
- displayport helpers - dpcd parser added
dp_cec:
- Allow a connector to be associated with a cec device
ttm:
- pipelining with no_gpu_wait fix
- always keep BOs on the LRU
sched:
- allow free_job routine to sleep
i915:
- Block userptr from mappable GTT
- i915 perf uapi versioning
- OA stream dynamic reconfiguration
- make context persistence optional
- introduce DRM_I915_UNSTABLE Kconfig
- add fake lmem testing under unstable
- BT.2020 support for DP MSA
- struct mutex elimination
- Tigerlake display/PLL/power management improvements
- Jasper Lake PCH support
- refactor PMU for multiple GPUs
- Icelake firmware update
- Split out vga + switcheroo code
amdgpu:
- implement dma-buf import/export without helpers
- vega20 RAS enablement
- DC i2c over aux fixes
- renoir GPU reset
- DC HDCP support
- BACO support for CI/VI asics
- MSI-X support
- Arcturus EEPROM support
- Arcturus VCN encode support
- VCN dynamic powergating on RV/RV2
amdkfd:
- add navi12/14/renoir support to kfd
radeon:
- SI dpm fix ported from amdgpu
- fix bad DMA on ppc platforms
gma500:
- memory leak fixes
qxl:
- convert to new gem mmap
exynos:
- build warning fix
komeda:
- add aclk sysfs attribute
v3d:
- userspace cleanup uapi change
i810:
- fix for underflow in dispatch ioctls
ast:
- refactor show_cursor
mgag200:
- refactor show_cursor
arcgpu:
- encoder finding improvements
mediatek:
- mipi_tx, dsi and partial crtc support for MT8183 SoC
- rotation support
meson:
- add suspend/resume support
omap:
- misc refactors
tegra:
- DisplayPort support for Tegra 210, 186 and 194.
- IOMMU-backed DMA API fixes
panfrost:
- fix lockdep issue
- simplify devfreq integration
rcar-du:
- R8A774B1 SoC support
- fixes for H2 ES2.0
sun4i:
- vcc-dsi regulator support
virtio-gpu:
- vmexit vs spinlock fix
- move to gem shmem helpers
- handle large command buffers with cma"
* tag 'drm-next-2019-11-27' of git://anongit.freedesktop.org/drm/drm: (1855 commits)
drm/amdgpu: invalidate mmhub semaphore workaround in gmc9/gmc10
drm/amdgpu: initialize vm_inv_eng0_sem for gfxhub and mmhub
drm/amd/amdgpu/sriov skip RLCG s/r list for arcturus VF.
drm/amd/amdgpu/sriov temporarily skip ras,dtm,hdcp for arcturus VF
drm/amdgpu/gfx10: re-init clear state buffer after gpu reset
merge fix for "ftrace: Rework event_create_dir()"
drm/amdgpu: Update Arcturus golden registers
drm/amdgpu/gfx10: fix out-of-bound mqd_backup array access
drm/amdgpu/gfx10: explicitly wait for cp idle after halt/unhalt
Revert "drm/amd/display: enable S/G for RAVEN chip"
drm/amdgpu: disable gfxoff on original raven
drm/amdgpu: remove experimental flag for Navi14
drm/amdgpu: disable gfxoff when using register read interface
drm/amdgpu/powerplay: properly set PP_GFXOFF_MASK (v2)
drm/amdgpu: fix bad DMA from INTERRUPT_CNTL2
drm/radeon: fix bad DMA from INTERRUPT_CNTL2
drm/amd/display: Fix debugfs on MST connectors
drm/amdgpu/nv: add asic func for fetching vbios from rom directly
drm/amdgpu: put flush_delayed_work at first
drm/amdgpu/vcn2.5: fix the enc loop with hw fini
...
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_drv.h')
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 135 |
1 files changed, 45 insertions, 90 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e28829661724..0b56ba005e25 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,12 +35,9 @@ #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_placement.h> #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" @@ -68,21 +65,23 @@ struct virtio_gpu_object_params { }; struct virtio_gpu_object { - struct drm_gem_object gem_base; + struct drm_gem_shmem_object base; uint32_t hw_res_handle; struct sg_table *pages; uint32_t mapped; - void *vmap; bool dumb; - struct ttm_place placement_code; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; bool created; }; #define gem_to_virtio_gpu_obj(gobj) \ - container_of((gobj), struct virtio_gpu_object, gem_base) + container_of((gobj), struct virtio_gpu_object, base.base) + +struct virtio_gpu_object_array { + struct ww_acquire_ctx ticket; + struct list_head next; + u32 nents, total; + struct drm_gem_object *objs[]; +}; struct virtio_gpu_vbuffer; struct virtio_gpu_device; @@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer { char *resp_buf; int resp_size; - virtio_gpu_resp_cb resp_cb; + struct virtio_gpu_object_array *objs; struct list_head list; }; @@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) -struct virtio_gpu_mman { - struct ttm_bo_device bdev; -}; - struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -179,8 +174,6 @@ struct virtio_gpu_device { struct virtio_device *vdev; - struct virtio_gpu_mman mman; - struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; uint32_t num_scanouts; @@ -205,6 +198,10 @@ struct virtio_gpu_device { struct work_struct config_changed_work; + struct work_struct obj_free_work; + spinlock_t obj_free_lock; + struct list_head obj_free_list; + struct virtio_gpu_drv_capset *capsets; uint32_t num_capsets; struct list_head cap_cache; @@ -217,9 +214,6 @@ struct virtio_gpu_fpriv { /* virtio_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; -int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head); -void virtio_gpu_unref_list(struct list_head *head); /* virtio_kms.c */ int virtio_gpu_init(struct drm_device *dev); @@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); void virtio_gpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); -struct virtio_gpu_object* -virtio_gpu_alloc_object(struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct virtio_gpu_fence *fence); int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_work(struct work_struct *work); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, uint32_t resource_id); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, @@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, uint32_t id); void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence); + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); @@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, enum drm_plane_type type, int index); -/* virtio_gpu_ttm.c */ -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev); -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev); -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); - /* virtio_gpu_fence.c */ bool virtio_fence_signaled(struct dma_fence *f); struct virtio_gpu_fence *virtio_gpu_fence_alloc( @@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq); /* virtio_gpu_object */ +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence); -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo); -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); - -static inline struct virtio_gpu_object* -virtio_gpu_object_ref(struct virtio_gpu_object *bo) -{ - ttm_bo_get(&bo->tbo); - return bo; -} - -static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo) -{ - struct ttm_buffer_object *tbo; - - if ((*bo) == NULL) - return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); - *bo = NULL; -} static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) { - return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); -} - -static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, - bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - struct virtio_gpu_device *qdev = - bo->gem_base.dev->dev_private; - dev_err(qdev->dev, "%p reserve failed\n", bo); - } - return r; - } - return 0; -} - -static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo) -{ - ttm_bo_unreserve(&bo->tbo); + return drm_vma_node_offset_addr(&bo->base.base.vma_node); } /* virgl debufs */ |