From 32a4eb04d59ae8d5bb5baa5a8528e31094ae8e84 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 10 Jun 2021 13:12:34 +0200 Subject: drm/fourcc: Add macros to determine the modifier vendor When working with framebuffer modifiers, it can be useful to extract the vendor identifier or check a modifier against a given vendor identifier. Add one macro that extracts the vendor identifier and a helper to check a modifier against a given vendor identifier. Reviewed-by: Daniel Vetter Acked-by: Daniel Stone Signed-off-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20210610111236.3814211-1-thierry.reding@gmail.com --- include/uapi/drm/drm_fourcc.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 9f4bb4a6f358..45a914850be0 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -373,6 +373,12 @@ extern "C" { #define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#define fourcc_mod_get_vendor(modifier) \ + (((modifier) >> 56) & 0xff) + +#define fourcc_mod_is_vendor(modifier, vendor) \ + (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor) + #define fourcc_mod_code(vendor, val) \ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL)) -- cgit v1.2.3 From 353be7c2328ccba0add424d015ef51ddf423e202 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 3 Sep 2021 13:00:16 +0000 Subject: drm: document drm_mode_create_lease object requirements validate_lease expects one CRTC, one connector and one plane. Signed-off-by: Simon Ser Cc: Daniel Vetter Cc: Pekka Paalanen Cc: Keith Packard Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210903130000.1590-1-contact@emersion.fr --- include/uapi/drm/drm_mode.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 90c55383f1ee..e4a2570a6058 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -1110,6 +1110,9 @@ struct drm_mode_destroy_blob { * struct drm_mode_create_lease - Create lease * * Lease mode resources, creating another drm_master. + * + * The @object_ids array must reference at least one CRTC, one connector and + * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. */ struct drm_mode_create_lease { /** @object_ids: Pointer to array of object ids (__u32) */ -- cgit v1.2.3 From 34268c9dde4cbae0b701b66c44497da068f418ee Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Tue, 21 Sep 2021 16:20:13 -0700 Subject: virtio-gpu api: multiple context types with explicit initialization This feature allows for each virtio-gpu 3D context to be created with a "context_init" variable. This variable can specify: - the type of protocol used by the context via the capset id. This is useful for differentiating virgl, gfxstream, and venus protocols by host userspace. - other things in the future, such as the version of the context. In addition, each different context needs one or more timelines, so for example a virgl context's waiting can be independent on a gfxstream context's waiting. VIRTIO_GPU_FLAG_INFO_RING_IDX is introduced to specific to tell the host which per-context command ring (or "hardware queue", distinct from the virtio-queue) the fence should be associated with. The new capability sets (gfxstream, venus etc.) are only defined in the virtio-gpu spec and not defined in the header. Signed-off-by: Gurchetan Singh Acked-by: Lingfeng Yang Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-2-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- include/uapi/linux/virtio_gpu.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 97523a95781d..f556fde07b76 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -59,6 +59,11 @@ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */ #define VIRTIO_GPU_F_RESOURCE_BLOB 3 +/* + * VIRTIO_GPU_CMD_CREATE_CONTEXT with + * context_init and multiple timelines + */ +#define VIRTIO_GPU_F_CONTEXT_INIT 4 enum virtio_gpu_ctrl_type { VIRTIO_GPU_UNDEFINED = 0, @@ -122,14 +127,20 @@ enum virtio_gpu_shm_id { VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 }; -#define VIRTIO_GPU_FLAG_FENCE (1 << 0) +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) +/* + * If the following flag is set, then ring_idx contains the index + * of the command ring that needs to used when creating the fence + */ +#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1) struct virtio_gpu_ctrl_hdr { __le32 type; __le32 flags; __le64 fence_id; __le32 ctx_id; - __le32 padding; + __u8 ring_idx; + __u8 padding[3]; }; /* data passed in the cursor vq */ @@ -269,10 +280,11 @@ struct virtio_gpu_resource_create_3d { }; /* VIRTIO_GPU_CMD_CTX_CREATE */ +#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff struct virtio_gpu_ctx_create { struct virtio_gpu_ctrl_hdr hdr; __le32 nlen; - __le32 padding; + __le32 context_init; char debug_name[64]; }; -- cgit v1.2.3 From b10790434cf2a40017bd796a99d5c4a6e949d616 Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Tue, 21 Sep 2021 16:20:14 -0700 Subject: drm/virtgpu api: create context init feature This change allows creating contexts of depending on set of context parameters. The meaning of each of the parameters is listed below: 1) VIRTGPU_CONTEXT_PARAM_CAPSET_ID This determines the type of a context based on the capability set ID. For example, the current capsets: VIRTIO_GPU_CAPSET_VIRGL VIRTIO_GPU_CAPSET_VIRGL2 define a Gallium, TGSI based "virgl" context. We only need 1 capset ID per context type, though virgl has two due a bug that has since been fixed. The use case is the "gfxstream" rendering library and "venus" renderer. gfxstream doesn't do Gallium/TGSI translation and mostly relies on auto-generated API streaming. Certain users prefer gfxstream over virgl for GLES on GLES emulation. {gfxstream vk}/{venus} are also required for Vulkan emulation. The maximum capset ID is 63. The goal is for guest userspace to choose the optimal context type depending on the situation/hardware. 2) VIRTGPU_CONTEXT_PARAM_NUM_RINGS This tells the number of independent command rings that the context will use. This value may be zero and is inferred to be zero if VIRTGPU_CONTEXT_PARAM_NUM_RINGS is not passed in. This is for backwards compatibility for virgl, which has one big giant command ring for all commands. The maxiumum number of rings is 64. In practice, multi-queue or multi-ring submission is used for powerful dGPUs and virtio-gpu may not be the best option in that case (see PCI passthrough or rendernode forwarding). 3) VIRTGPU_CONTEXT_PARAM_POLL_RING_IDX_MASK This is a mask of ring indices for which the DRM fd is pollable. For example, if VIRTGPU_CONTEXT_PARAM_NUM_RINGS is 2, then the mask may be: [ring idx] | [1 << ring_idx] | final mask ------------------------------------------- 0 1 1 1 2 3 The "Sommelier" guest Wayland proxy uses this to poll for events from the host compositor. Signed-off-by: Gurchetan Singh Acked-by: Lingfeng Yang Acked-by: Nicholas Verne Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-3-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- include/uapi/drm/virtgpu_drm.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index b9ec26e9c646..a13e20cc66b4 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -47,12 +47,15 @@ extern "C" { #define DRM_VIRTGPU_WAIT 0x08 #define DRM_VIRTGPU_GET_CAPS 0x09 #define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a +#define DRM_VIRTGPU_CONTEXT_INIT 0x0b #define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01 #define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02 +#define VIRTGPU_EXECBUF_RING_IDX 0x04 #define VIRTGPU_EXECBUF_FLAGS (\ VIRTGPU_EXECBUF_FENCE_FD_IN |\ VIRTGPU_EXECBUF_FENCE_FD_OUT |\ + VIRTGPU_EXECBUF_RING_IDX |\ 0) struct drm_virtgpu_map { @@ -68,6 +71,8 @@ struct drm_virtgpu_execbuffer { __u64 bo_handles; __u32 num_bo_handles; __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */ + __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */ + __u32 pad; }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ @@ -75,6 +80,8 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */ #define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */ #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */ +#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */ +#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */ struct drm_virtgpu_getparam { __u64 param; @@ -173,6 +180,22 @@ struct drm_virtgpu_resource_create_blob { __u64 blob_id; }; +#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001 +#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002 +#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003 +struct drm_virtgpu_context_set_param { + __u64 param; + __u64 value; +}; + +struct drm_virtgpu_context_init { + __u32 num_params; + __u32 pad; + + /* pointer to drm_virtgpu_context_set_param array */ + __u64 ctx_set_params; +}; + #define DRM_IOCTL_VIRTGPU_MAP \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) @@ -212,6 +235,10 @@ struct drm_virtgpu_resource_create_blob { DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \ struct drm_virtgpu_resource_create_blob) +#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \ + struct drm_virtgpu_context_init) + #if defined(__cplusplus) } #endif -- cgit v1.2.3 From 4bb2d367a5a2807185a04949ae922d247f650576 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 3 Sep 2021 13:00:32 +0000 Subject: drm/lease: allow empty leases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can be used to create a separate DRM file description, thus creating a new GEM handle namespace. My use-case is wlroots. The library splits responsibilities between separate components: the GBM allocator creates buffers, the GLES2 renderer uses EGL to import them and render to them, the DRM backend imports the buffers and displays them. wlroots has a modular architecture, and any of these components can be swapped and replaced with something else. For instance, the pipeline can be set up so that the DRM dumb buffer allocator is used instead of GBM and the Pixman renderer is used instead of GLES2. Library users can also replace any of these components with their own custom one. DMA-BUFs are used to pass buffer references across components. We could use GEM handles instead, but this would result in pain if multiple GPUs are in use: wlroots copies buffers across GPUs as needed. Importing a GEM handle created on one GPU into a completely different GPU will blow up (fail at best, mix unrelated buffers otherwise). Everything is fine if all components use Mesa. However, this isn't always desirable. For instance when running with DRM dumb buffers and the Pixman software renderer it's unfortunate to depend on GBM in the DRM backend just to turn DMA-BUFs into FB IDs. GBM loads Mesa drivers to perform an action which has nothing driver-specific. Additionally, drivers will fail the import if the 3D engine can't use the imported buffer, for instance amdgpu will refuse to import DRM dumb buffers [1]. We might also want to be running with a Vulkan renderer and a Vulkan allocator in the future, and GBM wouldn't be welcome in this setup. To address this, GBM can be side-stepped in the DRM backend, and can be replaced with drmPrimeFDToHandle calls. However because of GEM handle reference counting issues, care must be taken to avoid double-closing the same GEM handle. In particular, it's not possible to share a DRM FD with GBM or EGL and perform some drmPrimeFDToHandle calls manually. So wlroots needs to re-open the DRM FD to create a new GEM handle namespace. However there's no guarantee that the file-system permissions will be set up so that the primary FD can be opened by the compsoitor. On modern systems seatd or logind is a privileged process responsible for doing this, and other processes aren't expected to do it. For historical reasons systemd still allows physically logged in users to open primary DRM nodes, but this doesn't work on non-systemd setups and it's desirable to lock them down at some point. Some might suggest to open the render node instead of re-opening the primary node. However some systems don't have a render node at all (e.g. no GPU, or a split render/display SoC). Solutions to this issue have been discussed in [2]. One solution would be to open the magic /proc/self/fd/ file, but it's a Linux-specific hack (wlroots supports BSDs too). Another solution is to add support for re-opening a DRM primary node to seatd/logind, but they don't support it now and really haven't been designed for this (logind would need to grow a completely new API, because it assumes unique dev_t IDs). Also this seems like pushing down a kernel limitation to user-space a bit too hard. Another solution is to allow creating empty DRM leases. The lessee FD would have its own GEM handle namespace, so wouldn't conflict wth GBM/EGL. It would have the master bit set, but would be able to manage zero resources. wlroots doesn't intend to share this FD with any other process. All in all IMHO that seems like a pretty reasonable solution to the issue at hand. Note, I've discussed with Jonas Ådahl and Mutter plans to adopt a similar design in the future. Example usage in wlroots is available at [3]. IGT test available at [4]. [1]: https://github.com/swaywm/wlroots/issues/2916 [2]: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/110 [3]: https://github.com/swaywm/wlroots/pull/3158 [4]: https://patchwork.freedesktop.org/series/94323/ Signed-off-by: Simon Ser Cc: Daniel Vetter Cc: Daniel Stone Cc: Pekka Paalanen Cc: Michel Dänzer Cc: Emil Velikov Cc: Keith Packard Cc: Boris Brezillon Cc: Dave Airlie Acked-by: Pekka Paalanen Reviewed-by: Daniel Stone Link: https://patchwork.freedesktop.org/patch/msgid/20210903130000.1590-2-contact@emersion.fr --- drivers/gpu/drm/drm_lease.c | 39 ++++++++++++++++++--------------------- include/uapi/drm/drm_mode.h | 3 ++- 2 files changed, 20 insertions(+), 22 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index dee4f24a1808..d72c2fac0ff1 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -489,12 +489,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - /* need some objects */ - if (cl->object_count == 0) { - DRM_DEBUG_LEASE("no objects in lease\n"); - return -EINVAL; - } - if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) { DRM_DEBUG_LEASE("invalid flags\n"); return -EINVAL; @@ -510,23 +504,26 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, object_count = cl->object_count; - object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), - array_size(object_count, sizeof(__u32))); - if (IS_ERR(object_ids)) { - ret = PTR_ERR(object_ids); - goto out_lessor; - } - + /* Handle leased objects, if any */ idr_init(&leases); + if (object_count != 0) { + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), + array_size(object_count, sizeof(__u32))); + if (IS_ERR(object_ids)) { + ret = PTR_ERR(object_ids); + idr_destroy(&leases); + goto out_lessor; + } - /* fill and validate the object idr */ - ret = fill_object_idr(dev, lessor_priv, &leases, - object_count, object_ids); - kfree(object_ids); - if (ret) { - DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); - idr_destroy(&leases); - goto out_lessor; + /* fill and validate the object idr */ + ret = fill_object_idr(dev, lessor_priv, &leases, + object_count, object_ids); + kfree(object_ids); + if (ret) { + DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); + idr_destroy(&leases); + goto out_lessor; + } } /* Allocate a file descriptor for the lease */ diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index e4a2570a6058..e1e351682872 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -1112,7 +1112,8 @@ struct drm_mode_destroy_blob { * Lease mode resources, creating another drm_master. * * The @object_ids array must reference at least one CRTC, one connector and - * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. + * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively, + * the lease can be completely empty. */ struct drm_mode_create_lease { /** @object_ids: Pointer to array of object ids (__u32) */ -- cgit v1.2.3 From bb3425efdcd99f2b4e608e850226f7107b2f993e Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Sep 2021 17:18:50 +0100 Subject: drm/v3d: add generic ioctl extension Add support to attach generic extensions on job submission. This patch is third prep work to enable multiple syncobjs on job submission. With this work, when the job submission interface needs to be extended to accommodate a new feature, we will use a generic extension struct where an id determines the data type to be pointed. The first application is to enable multiples in/out syncobj (next patch), but the base is already done for future features. Therefore, to attach a new feature, a specific extension struct should subclass drm_v3d_extension and update the list of extensions in a job submission. v2: - remove redundant elements to subclass struct (Daniel) v3: - add comment for v3d_get_extensions Signed-off-by: Melissa Wen Reviewed-by: Iago Toral Quiroga Signed-off-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/ed53b1cd7e3125b76f18fe3fb995a04393639bc6.1633016479.git.mwen@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 4 +-- drivers/gpu/drm/v3d/v3d_gem.c | 74 +++++++++++++++++++++++++++++++++++++++++-- include/uapi/drm/v3d_drm.h | 31 ++++++++++++++++++ 3 files changed, 103 insertions(+), 6 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index c1deab2cf38d..3d6b9bcce2f7 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -83,7 +83,6 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, return 0; } - switch (args->param) { case DRM_V3D_PARAM_SUPPORTS_TFU: args->value = 1; @@ -147,7 +146,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) DEFINE_DRM_GEM_FOPS(v3d_drm_fops); /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP - * protection between clients. Note that render nodes would be be + * protection between clients. Note that render nodes would be * able to submit CLs that could access BOs from clients authenticated * with the master node. The TFU doesn't use the GMP, so it would * need to stay DRM_AUTH until we do buffer size/offset validation. @@ -219,7 +218,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) u32 mmu_debug; u32 ident1; - v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm); if (IS_ERR(v3d)) return PTR_ERR(v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index f932e73fb5c6..93f130fb3a13 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -537,6 +537,36 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, } } +/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data + * according to the extension id (name). + */ +static int +v3d_get_extensions(struct drm_file *file_priv, u64 ext_handles) +{ + struct drm_v3d_extension __user *user_ext; + + user_ext = u64_to_user_ptr(ext_handles); + while (user_ext) { + struct drm_v3d_extension ext; + + if (copy_from_user(&ext, user_ext, sizeof(ext))) { + DRM_DEBUG("Failed to copy submit extension\n"); + return -EFAULT; + } + + switch (ext.id) { + case 0: + default: + DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); + return -EINVAL; + } + + user_ext = u64_to_user_ptr(ext.next); + } + + return 0; +} + /** * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. * @dev: DRM device @@ -565,15 +595,24 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); - if (args->pad != 0) + if (args->pad) return -EINVAL; - if (args->flags != 0 && - args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + if (args->flags && + args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | + DRM_V3D_SUBMIT_EXTENSION)) { DRM_INFO("invalid flags: %d\n", args->flags); return -EINVAL; } + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), v3d_render_job_free, args->in_sync_rcl, V3D_RENDER); if (ret) @@ -702,6 +741,19 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_DEBUG("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), v3d_job_free, args->in_sync, V3D_TFU); if (ret) @@ -786,11 +838,27 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); + if (args->pad) + return -EINVAL; + if (!v3d_has_csd(v3d)) { DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); return -EINVAL; } + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), v3d_job_free, args->in_sync, V3D_CSD); if (ret) diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 4104f22fb3d3..55b443ca6c0b 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -58,6 +58,20 @@ extern "C" { struct drm_v3d_perfmon_get_values) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 +#define DRM_V3D_SUBMIT_EXTENSION 0x02 + +/* struct drm_v3d_extension - ioctl extensions + * + * Linked-list of generic extensions where the id identify which struct is + * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify + * the extension type. + */ +struct drm_v3d_extension { + __u64 next; + __u32 id; +#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 + __u32 flags; /* mbz */ +}; /** * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D @@ -135,12 +149,16 @@ struct drm_v3d_submit_cl { /* Number of BO handles passed in (size is that times 4). */ __u32 bo_handle_count; + /* DRM_V3D_SUBMIT_* properties */ __u32 flags; /* ID of the perfmon to attach to this job. 0 means no perfmon. */ __u32 perfmon_id; __u32 pad; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; }; /** @@ -248,6 +266,12 @@ struct drm_v3d_submit_tfu { __u32 in_sync; /* Sync object to signal when the TFU job is done. */ __u32 out_sync; + + __u32 flags; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; + }; /* Submits a compute shader for dispatch. This job will block on any @@ -276,6 +300,13 @@ struct drm_v3d_submit_csd { /* ID of the perfmon to attach to this job. 0 means no perfmon. */ __u32 perfmon_id; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; + + __u32 flags; + + __u32 pad; }; enum { -- cgit v1.2.3 From e4165ae8304e5ea822fbe5909dd3be5445c058b7 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 30 Sep 2021 17:19:56 +0100 Subject: drm/v3d: add multiple syncobjs support Using the generic extension from the previous patch, a specific multisync extension enables more than one in/out binary syncobj per job submission. Arrays of syncobjs are set in struct drm_v3d_multisync, that also cares of determining the stage for sync (wait deps) according to the job queue. v2: - subclass the generic extension struct (Daniel) - simplify adding dependency conditions to make understandable (Iago) v3: - fix conditions to consider single or multiples in/out_syncs (Iago) - remove irrelevant comment (Iago) Signed-off-by: Melissa Wen Reviewed-by: Iago Toral Quiroga Signed-off-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/ffd8b2e3dd2e0c686db441a0c0a4a0181ff85328.1633016479.git.mwen@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 6 +- drivers/gpu/drm/v3d/v3d_drv.h | 24 ++++-- drivers/gpu/drm/v3d/v3d_gem.c | 185 +++++++++++++++++++++++++++++++++++++----- include/uapi/drm/v3d_drm.h | 49 ++++++++++- 4 files changed, 232 insertions(+), 32 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 3d6b9bcce2f7..bd46396a1ae0 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -96,6 +96,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_PERFMON: args->value = (v3d->ver >= 40); return 0; + case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: + args->value = 1; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -135,9 +138,8 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) struct v3d_file_priv *v3d_priv = file->driver_priv; enum v3d_queue q; - for (q = 0; q < V3D_MAX_QUEUES; q++) { + for (q = 0; q < V3D_MAX_QUEUES; q++) drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); - } v3d_perfmon_close_file(v3d_priv); kfree(v3d_priv); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index b900a050d5e2..b74b1351bfc8 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,15 +19,6 @@ struct reset_control; #define GMP_GRANULARITY (128 * 1024) -/* Enum for each of the V3D queues. */ -enum v3d_queue { - V3D_BIN, - V3D_RENDER, - V3D_TFU, - V3D_CSD, - V3D_CACHE_CLEAN, -}; - #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) struct v3d_queue_state { @@ -294,6 +285,21 @@ struct v3d_csd_job { struct drm_v3d_submit_csd args; }; +struct v3d_submit_outsync { + struct drm_syncobj *syncobj; +}; + +struct v3d_submit_ext { + u32 flags; + u32 wait_stage; + + u32 in_sync_count; + u64 in_syncs; + + u32 out_sync_count; + struct v3d_submit_outsync *out_syncs; +}; + /** * __wait_for - magic wait macro * diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 93f130fb3a13..6a000d77c568 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -454,11 +454,12 @@ v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job, static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, void **container, size_t size, void (*free)(struct kref *ref), - u32 in_sync, enum v3d_queue queue) + u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct v3d_job *job; - int ret; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int ret, i; *container = kcalloc(1, size, GFP_KERNEL); if (!*container) { @@ -479,9 +480,28 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, if (ret) goto fail_job; - ret = v3d_job_add_deps(file_priv, job, in_sync, 0); - if (ret) - goto fail_deps; + if (has_multisync) { + if (se->in_sync_count && se->wait_stage == queue) { + struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); + + for (i = 0; i < se->in_sync_count; i++) { + struct drm_v3d_sem in; + + ret = copy_from_user(&in, handle++, sizeof(in)); + if (ret) { + DRM_DEBUG("Failed to copy wait dep handle.\n"); + goto fail_deps; + } + ret = v3d_job_add_deps(file_priv, job, in.handle, 0); + if (ret) + goto fail_deps; + } + } + } else { + ret = v3d_job_add_deps(file_priv, job, in_sync, 0); + if (ret) + goto fail_deps; + } kref_init(&job->refcount); @@ -516,9 +536,11 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, + struct v3d_submit_ext *se, struct dma_fence *done_fence) { struct drm_syncobj *sync_out; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); int i; for (i = 0; i < job->bo_count; i++) { @@ -530,20 +552,130 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); /* Update the return sync object for the job */ - sync_out = drm_syncobj_find(file_priv, out_sync); - if (sync_out) { - drm_syncobj_replace_fence(sync_out, done_fence); - drm_syncobj_put(sync_out); + /* If it only supports a single signal semaphore*/ + if (!has_multisync) { + sync_out = drm_syncobj_find(file_priv, out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, done_fence); + drm_syncobj_put(sync_out); + } + return; + } + + /* If multiple semaphores extension is supported */ + if (se->out_sync_count) { + for (i = 0; i < se->out_sync_count; i++) { + drm_syncobj_replace_fence(se->out_syncs[i].syncobj, + done_fence); + drm_syncobj_put(se->out_syncs[i].syncobj); + } + kvfree(se->out_syncs); + } +} + +static void +v3d_put_multisync_post_deps(struct v3d_submit_ext *se) +{ + unsigned int i; + + if (!(se && se->out_sync_count)) + return; + + for (i = 0; i < se->out_sync_count; i++) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); +} + +static int +v3d_get_multisync_post_deps(struct drm_file *file_priv, + struct v3d_submit_ext *se, + u32 count, u64 handles) +{ + struct drm_v3d_sem __user *post_deps; + int i, ret; + + if (!count) + return 0; + + se->out_syncs = (struct v3d_submit_outsync *) + kvmalloc_array(count, + sizeof(struct v3d_submit_outsync), + GFP_KERNEL); + if (!se->out_syncs) + return -ENOMEM; + + post_deps = u64_to_user_ptr(handles); + + for (i = 0; i < count; i++) { + struct drm_v3d_sem out; + + ret = copy_from_user(&out, post_deps++, sizeof(out)); + if (ret) { + DRM_DEBUG("Failed to copy post dep handles\n"); + goto fail; + } + + se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, + out.handle); + if (!se->out_syncs[i].syncobj) { + ret = -EINVAL; + goto fail; + } } + se->out_sync_count = count; + + return 0; + +fail: + for (i--; i >= 0; i--) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); + + return ret; +} + +/* Get data for multiple binary semaphores synchronization. Parse syncobj + * to be signaled when job completes (out_sync). + */ +static int +v3d_get_multisync_submit_deps(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + void *data) +{ + struct drm_v3d_multi_sync multisync; + struct v3d_submit_ext *se = data; + int ret; + + ret = copy_from_user(&multisync, ext, sizeof(multisync)); + if (ret) + return ret; + + if (multisync.pad) + return -EINVAL; + + ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, + multisync.out_syncs); + if (ret) + return ret; + + se->in_sync_count = multisync.in_sync_count; + se->in_syncs = multisync.in_syncs; + se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; + se->wait_stage = multisync.wait_stage; + + return 0; } /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name). */ static int -v3d_get_extensions(struct drm_file *file_priv, u64 ext_handles) +v3d_get_extensions(struct drm_file *file_priv, + u64 ext_handles, + void *data) { struct drm_v3d_extension __user *user_ext; + int ret; user_ext = u64_to_user_ptr(ext_handles); while (user_ext) { @@ -555,7 +687,11 @@ v3d_get_extensions(struct drm_file *file_priv, u64 ext_handles) } switch (ext.id) { - case 0: + case DRM_V3D_EXT_ID_MULTI_SYNC: + ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); + if (ret) + return ret; + break; default: DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL; @@ -586,6 +722,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_cl *args = data; + struct v3d_submit_ext se = {0}; struct v3d_bin_job *bin = NULL; struct v3d_render_job *render = NULL; struct v3d_job *clean_job = NULL; @@ -606,7 +743,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions); + ret = v3d_get_extensions(file_priv, args->extensions, &se); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -614,7 +751,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), - v3d_render_job_free, args->in_sync_rcl, V3D_RENDER); + v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); if (ret) goto fail; @@ -624,7 +761,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->bcl_start != args->bcl_end) { ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), - v3d_job_free, args->in_sync_bcl, V3D_BIN); + v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); if (ret) goto fail; @@ -638,7 +775,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, 0, V3D_CACHE_CLEAN); if (ret) goto fail; @@ -698,6 +835,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, last_job, &acquire_ctx, args->out_sync, + &se, last_job->done_fence); if (bin) @@ -716,6 +854,7 @@ fail: v3d_job_cleanup((void *)bin); v3d_job_cleanup((void *)render); v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); return ret; } @@ -735,6 +874,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, { struct v3d_dev *v3d = to_v3d_dev(dev); struct drm_v3d_submit_tfu *args = data; + struct v3d_submit_ext se = {0}; struct v3d_tfu_job *job = NULL; struct ww_acquire_ctx acquire_ctx; int ret = 0; @@ -747,7 +887,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions); + ret = v3d_get_extensions(file_priv, args->extensions, &se); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -755,7 +895,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, } ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, V3D_TFU); + v3d_job_free, args->in_sync, &se, V3D_TFU); if (ret) goto fail; @@ -803,6 +943,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, v3d_attach_fences_and_unlock_reservation(file_priv, &job->base, &acquire_ctx, args->out_sync, + &se, job->base.done_fence); v3d_job_put(&job->base); @@ -811,6 +952,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, fail: v3d_job_cleanup((void *)job); + v3d_put_multisync_post_deps(&se); return ret; } @@ -831,6 +973,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_csd *args = data; + struct v3d_submit_ext se = {0}; struct v3d_csd_job *job = NULL; struct v3d_job *clean_job = NULL; struct ww_acquire_ctx acquire_ctx; @@ -852,7 +995,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions); + ret = v3d_get_extensions(file_priv, args->extensions, &se); if (ret) { DRM_DEBUG("Failed to get extensions.\n"); return ret; @@ -860,12 +1003,12 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, V3D_CSD); + v3d_job_free, args->in_sync, &se, V3D_CSD); if (ret) goto fail; ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, 0, V3D_CACHE_CLEAN); if (ret) goto fail; @@ -904,6 +1047,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, clean_job, &acquire_ctx, args->out_sync, + &se, clean_job->done_fence); v3d_job_put(&job->base); @@ -918,6 +1062,7 @@ fail_unreserve: fail: v3d_job_cleanup((void *)job); v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); return ret; } diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 55b443ca6c0b..3dfc0af8756a 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -73,6 +73,53 @@ struct drm_v3d_extension { __u32 flags; /* mbz */ }; +/* struct drm_v3d_sem - wait/signal semaphore + * + * If binary semaphore, it only takes syncobj handle and ignores flags and + * point fields. Point is defined for timeline syncobj feature. + */ +struct drm_v3d_sem { + __u32 handle; /* syncobj */ + /* rsv below, for future uses */ + __u32 flags; + __u64 point; /* for timeline sem support */ + __u64 mbz[2]; /* must be zero, rsv */ +}; + +/* Enum for each of the V3D queues. */ +enum v3d_queue { + V3D_BIN, + V3D_RENDER, + V3D_TFU, + V3D_CSD, + V3D_CACHE_CLEAN, +}; + +/** + * struct drm_v3d_multi_sync - ioctl extension to add support multiples + * syncobjs for commands submission. + * + * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to + * this extension to define wait and signal dependencies, instead of single + * in/out sync entries on submitting commands. The field flags is used to + * determine the stage to set wait dependencies. + */ +struct drm_v3d_multi_sync { + struct drm_v3d_extension base; + /* Array of wait and signal semaphores */ + __u64 in_syncs; + __u64 out_syncs; + + /* Number of entries */ + __u32 in_sync_count; + __u32 out_sync_count; + + /* set the stage (v3d_queue) to sync */ + __u32 wait_stage; + + __u32 pad; /* mbz */ +}; + /** * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D * engine. @@ -228,6 +275,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_CSD, DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, DRM_V3D_PARAM_SUPPORTS_PERFMON, + DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, }; struct drm_v3d_get_param { @@ -271,7 +319,6 @@ struct drm_v3d_submit_tfu { /* Pointer to an array of ioctl extensions*/ __u64 extensions; - }; /* Submits a compute shader for dispatch. This job will block on any -- cgit v1.2.3 From cbbd3764b2399ad882cda98435b25144e9ea2124 Mon Sep 17 00:00:00 2001 From: "Huang, Sean Z" Date: Fri, 24 Sep 2021 12:14:42 -0700 Subject: drm/i915/pxp: Create the arbitrary session after boot Create the arbitrary session, with the fixed session id 0xf, after system boot, for the case that application allocates the protected buffer without establishing any protection session. Because the hardware requires at least one alive session for protected buffer creation. This arbitrary session will need to be re-created after teardown or power event because hardware encryption key won't be valid after such cases. The session ID is exposed as part of the uapi so it can be used as part of userspace commands. v2: use gt->uncore->rpm (Chris) v3: s/arb_is_in_play/arb_is_valid (Chris), move set-up to the new init_hw function v4: move interface defs to separate header, set arb_is valid to false on fini (Rodrigo) v5: handle async component binding Signed-off-by: Huang, Sean Z Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20210924191452.1539378-8-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/pxp/intel_pxp.c | 12 +++ drivers/gpu/drm/i915/pxp/intel_pxp.h | 2 + drivers/gpu/drm/i915/pxp/intel_pxp_session.c | 74 ++++++++++++++++++ drivers/gpu/drm/i915/pxp/intel_pxp_session.h | 15 ++++ drivers/gpu/drm/i915/pxp/intel_pxp_tee.c | 87 ++++++++++++++++++++++ drivers/gpu/drm/i915/pxp/intel_pxp_tee.h | 3 + drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h | 36 +++++++++ drivers/gpu/drm/i915/pxp/intel_pxp_types.h | 10 +++ include/uapi/drm/i915_drm.h | 3 + 10 files changed, 243 insertions(+) create mode 100644 drivers/gpu/drm/i915/pxp/intel_pxp_session.c create mode 100644 drivers/gpu/drm/i915/pxp/intel_pxp_session.h create mode 100644 drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index d790b603fbdd..1152348c04a9 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -281,6 +281,7 @@ i915-y += i915_perf.o # Protected execution platform (PXP) support i915-$(CONFIG_DRM_I915_PXP) += \ pxp/intel_pxp.o \ + pxp/intel_pxp_session.o \ pxp/intel_pxp_tee.o # Post-mortem debug and GPU hang state capture diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index f7166dd71d8f..65598487a7b2 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -3,6 +3,7 @@ * Copyright(c) 2020 Intel Corporation. */ #include "intel_pxp.h" +#include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "gt/intel_context.h" #include "i915_drv.h" @@ -12,6 +13,11 @@ struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp) return container_of(pxp, struct intel_gt, pxp); } +bool intel_pxp_is_active(const struct intel_pxp *pxp) +{ + return pxp->arb_is_valid; +} + /* KCR register definitions */ #define KCR_INIT _MMIO(0x320f0) /* Setting KCR Init bit is required after system boot */ @@ -72,6 +78,8 @@ void intel_pxp_init(struct intel_pxp *pxp) if (!HAS_PXP(gt->i915)) return; + mutex_init(&pxp->tee_mutex); + ret = create_vcs_context(pxp); if (ret) return; @@ -93,6 +101,8 @@ void intel_pxp_fini(struct intel_pxp *pxp) if (!intel_pxp_is_enabled(pxp)) return; + pxp->arb_is_valid = false; + intel_pxp_tee_component_fini(pxp); destroy_vcs_context(pxp); @@ -101,6 +111,8 @@ void intel_pxp_fini(struct intel_pxp *pxp) void intel_pxp_init_hw(struct intel_pxp *pxp) { kcr_pxp_enable(pxp_to_gt(pxp)); + + intel_pxp_create_arb_session(pxp); } void intel_pxp_fini_hw(struct intel_pxp *pxp) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index cd6560b2605c..2960e8338c82 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -15,6 +15,8 @@ static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) #ifdef CONFIG_DRM_I915_PXP struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp); +bool intel_pxp_is_active(const struct intel_pxp *pxp); + void intel_pxp_init(struct intel_pxp *pxp); void intel_pxp_fini(struct intel_pxp *pxp); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c new file mode 100644 index 000000000000..3331868f354c --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#include "drm/i915_drm.h" +#include "i915_drv.h" + +#include "intel_pxp.h" +#include "intel_pxp_session.h" +#include "intel_pxp_tee.h" +#include "intel_pxp_types.h" + +#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */ + +#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */ + +static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + intel_wakeref_t wakeref; + u32 sip = 0; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + sip = intel_uncore_read(gt->uncore, GEN12_KCR_SIP); + + return sip & BIT(id); +} + +static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + intel_wakeref_t wakeref; + u32 mask = BIT(id); + int ret; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + ret = intel_wait_for_register(gt->uncore, + GEN12_KCR_SIP, + mask, + in_play ? mask : 0, + 100); + + return ret; +} + +int intel_pxp_create_arb_session(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + int ret; + + pxp->arb_is_valid = false; + + if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) { + drm_err(>->i915->drm, "arb session already in play at creation time\n"); + return -EEXIST; + } + + ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION); + if (ret) { + drm_err(>->i915->drm, "tee cmd for arb session creation failed\n"); + return ret; + } + + ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true); + if (ret) { + drm_err(>->i915->drm, "arb session failed to go in play\n"); + return ret; + } + + pxp->arb_is_valid = true; + + return 0; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h new file mode 100644 index 000000000000..316c3bebed9c --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_SESSION_H__ +#define __INTEL_PXP_SESSION_H__ + +#include + +struct intel_pxp; + +int intel_pxp_create_arb_session(struct intel_pxp *pxp); + +#endif /* __INTEL_PXP_SESSION_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 0c0c7946e6a0..accbbf9aa6d1 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -8,13 +8,63 @@ #include "drm/i915_component.h" #include "i915_drv.h" #include "intel_pxp.h" +#include "intel_pxp_session.h" #include "intel_pxp_tee.h" +#include "intel_pxp_tee_interface.h" static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev) { return &kdev_to_i915(i915_kdev)->gt.pxp; } +static int intel_pxp_tee_io_message(struct intel_pxp *pxp, + void *msg_in, u32 msg_in_size, + void *msg_out, u32 msg_out_max_size, + u32 *msg_out_rcv_size) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct i915_pxp_component *pxp_component = pxp->pxp_component; + int ret = 0; + + mutex_lock(&pxp->tee_mutex); + + /* + * The binding of the component is asynchronous from i915 probe, so we + * can't be sure it has happened. + */ + if (!pxp_component) { + ret = -ENODEV; + goto unlock; + } + + ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size); + if (ret) { + drm_err(&i915->drm, "Failed to send PXP TEE message\n"); + goto unlock; + } + + ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size); + if (ret < 0) { + drm_err(&i915->drm, "Failed to receive PXP TEE message\n"); + goto unlock; + } + + if (ret > msg_out_max_size) { + drm_err(&i915->drm, + "Failed to receive PXP TEE message due to unexpected output size\n"); + ret = -ENOSPC; + goto unlock; + } + + if (msg_out_rcv_size) + *msg_out_rcv_size = ret; + + ret = 0; +unlock: + mutex_unlock(&pxp->tee_mutex); + return ret; +} + /** * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee * @i915_kdev: pointer to i915 kernel device @@ -28,14 +78,24 @@ static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev) static int i915_pxp_tee_component_bind(struct device *i915_kdev, struct device *tee_kdev, void *data) { + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + mutex_lock(&pxp->tee_mutex); pxp->pxp_component = data; pxp->pxp_component->tee_dev = tee_kdev; + mutex_unlock(&pxp->tee_mutex); /* the component is required to fully start the PXP HW */ intel_pxp_init_hw(pxp); + if (!pxp->arb_is_valid) { + drm_err(&i915->drm, "Failed to create arb session during bind\n"); + intel_pxp_fini_hw(pxp); + pxp->pxp_component = NULL; + return -EIO; + } + return 0; } @@ -46,7 +106,9 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev, intel_pxp_fini_hw(pxp); + mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; + mutex_unlock(&pxp->tee_mutex); } static const struct component_ops i915_pxp_tee_component_ops = { @@ -82,3 +144,28 @@ void intel_pxp_tee_component_fini(struct intel_pxp *pxp) component_del(i915->drm.dev, &i915_pxp_tee_component_ops); pxp->pxp_component_added = false; } + +int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, + int arb_session_id) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct pxp_tee_create_arb_in msg_in = {0}; + struct pxp_tee_create_arb_out msg_out = {0}; + int ret; + + msg_in.header.api_version = PXP_TEE_APIVER; + msg_in.header.command_id = PXP_TEE_ARB_CMDID; + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + msg_in.protection_mode = PXP_TEE_ARB_PROTECTION_MODE; + msg_in.session_id = arb_session_id; + + ret = intel_pxp_tee_io_message(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), + NULL); + + if (ret) + drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret); + + return ret; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h index 23d050a5d3e7..c136053ce340 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h @@ -11,4 +11,7 @@ int intel_pxp_tee_component_init(struct intel_pxp *pxp); void intel_pxp_tee_component_fini(struct intel_pxp *pxp); +int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, + int arb_session_id); + #endif /* __INTEL_PXP_TEE_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h new file mode 100644 index 000000000000..36e9b0868f5c --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_TEE_INTERFACE_H__ +#define __INTEL_PXP_TEE_INTERFACE_H__ + +#include + +#define PXP_TEE_APIVER 0x40002 +#define PXP_TEE_ARB_CMDID 0x1e +#define PXP_TEE_ARB_PROTECTION_MODE 0x2 + +/* PXP TEE message header */ +struct pxp_tee_cmd_header { + u32 api_version; + u32 command_id; + u32 status; + /* Length of the message (excluding the header) */ + u32 buffer_len; +} __packed; + +/* PXP TEE message input to create a arbitrary session */ +struct pxp_tee_create_arb_in { + struct pxp_tee_cmd_header header; + u32 protection_mode; + u32 session_id; +} __packed; + +/* PXP TEE message output to create a arbitrary session */ +struct pxp_tee_create_arb_out { + struct pxp_tee_cmd_header header; +} __packed; + +#endif /* __INTEL_PXP_TEE_INTERFACE_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 3a8e17e591bd..d393e46a7d98 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -6,6 +6,7 @@ #ifndef __INTEL_PXP_TYPES_H__ #define __INTEL_PXP_TYPES_H__ +#include #include struct intel_context; @@ -16,6 +17,15 @@ struct intel_pxp { bool pxp_component_added; struct intel_context *ce; + + /* + * After a teardown, the arb session can still be in play on the HW + * even if the keys are gone, so we can't rely on the HW state of the + * session to know if it's valid and need to track the status in SW. + */ + bool arb_is_valid; + + struct mutex tee_mutex; /* protects the tee channel binding */ }; #endif /* __INTEL_PXP_TYPES_H__ */ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index bde5860b3686..920e9e852e5a 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -3038,6 +3038,9 @@ struct drm_i915_gem_create_ext_memory_regions { __u64 regions; }; +/* ID of the protected content session managed by i915 when PXP is active */ +#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf + #if defined(__cplusplus) } #endif -- cgit v1.2.3 From d3ac8d42168a9be7380be8035df8b6d3780ec2a1 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 24 Sep 2021 12:14:45 -0700 Subject: drm/i915/pxp: interfaces for using protected objects This api allow user mode to create protected buffers and to mark contexts as making use of such objects. Only when using contexts marked in such a way is the execution guaranteed to work as expected. Contexts can only be marked as using protected content at creation time (i.e. the parameter is immutable) and they must be both bannable and not recoverable. Given that the protected session gets invalidated on suspend, contexts created this way hold a runtime pm wakeref until they're either destroyed or invalidated. All protected objects and contexts will be considered invalid when the PXP session is destroyed and all new submissions using them will be rejected. All intel contexts within the invalidated gem contexts will be marked banned. Userspace can detect that an invalidation has occurred via the RESET_STATS ioctl, where we report it the same way as a ban due to a hang. v5: squash patches, rebase on proto_ctx, update kerneldoc v6: rebase on obj create_ext changes v7: Use session counter to check if an object it valid, hold wakeref in context, don't add a new flag to RESET_STATS (Daniel) v8: don't increase guilty count for contexts banned during pxp invalidation (Rodrigo) v9: better comments, avoid wakeref put race between pxp_inval and context_close, add usage examples (Rodrigo) v10: modify internal set/get-protected-context functions to not return -ENODEV when setting PXP param to false or getting param when running on pxp-unsupported hw or getting param when i915 was built with CONFIG_PXP off Signed-off-by: Alan Previn Signed-off-by: Daniele Ceraolo Spurio Signed-off-by: Bommu Krishnaiah Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Lionel Landwerlin Cc: Jason Ekstrand Cc: Daniel Vetter Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20210924191452.1539378-11-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 95 +++++++++++++++++++---- drivers/gpu/drm/i915/gem/i915_gem_context.h | 6 ++ drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 28 +++++++ drivers/gpu/drm/i915/gem/i915_gem_create.c | 72 ++++++++++++----- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 18 +++++ drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_object.h | 6 ++ drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 9 ++- drivers/gpu/drm/i915/gem/selftests/mock_context.c | 4 +- drivers/gpu/drm/i915/pxp/intel_pxp.c | 77 ++++++++++++++++++ drivers/gpu/drm/i915/pxp/intel_pxp.h | 12 +++ drivers/gpu/drm/i915/pxp/intel_pxp_session.c | 6 ++ drivers/gpu/drm/i915/pxp/intel_pxp_types.h | 9 +++ include/uapi/drm/i915_drm.h | 94 ++++++++++++++++++++++ 14 files changed, 402 insertions(+), 35 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 8208fd5b72c3..08360d6bbd1e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -77,6 +77,8 @@ #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" +#include "pxp/intel_pxp.h" + #include "i915_gem_context.h" #include "i915_trace.h" #include "i915_user_extensions.h" @@ -186,10 +188,13 @@ static int validate_priority(struct drm_i915_private *i915, return 0; } -static void proto_context_close(struct i915_gem_proto_context *pc) +static void proto_context_close(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc) { int i; + if (pc->pxp_wakeref) + intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); if (pc->vm) i915_vm_put(pc->vm); if (pc->user_engines) { @@ -241,6 +246,33 @@ static int proto_context_set_persistence(struct drm_i915_private *i915, return 0; } +static int proto_context_set_protected(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc, + bool protected) +{ + int ret = 0; + + if (!protected) { + pc->uses_protected_content = false; + } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) { + ret = -ENODEV; + } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || + !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { + ret = -EPERM; + } else { + pc->uses_protected_content = true; + + /* + * protected context usage requires the PXP session to be up, + * which in turn requires the device to be active. + */ + pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + ret = intel_pxp_wait_for_arb_start(&i915->gt.pxp); + } + + return ret; +} + static struct i915_gem_proto_context * proto_context_create(struct drm_i915_private *i915, unsigned int flags) { @@ -269,7 +301,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags) return pc; proto_close: - proto_context_close(pc); + proto_context_close(i915, pc); return err; } @@ -693,6 +725,8 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, ret = -EPERM; else if (args->value) pc->user_flags |= BIT(UCONTEXT_BANNABLE); + else if (pc->uses_protected_content) + ret = -EPERM; else pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break; @@ -700,10 +734,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; - else if (args->value) - pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); - else + else if (!args->value) pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); + else if (pc->uses_protected_content) + ret = -EPERM; + else + pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); break; case I915_CONTEXT_PARAM_PRIORITY: @@ -731,6 +767,11 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, args->value); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = proto_context_set_protected(fpriv->dev_priv, pc, + args->value); + break; + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: @@ -956,6 +997,9 @@ static void i915_gem_context_release_work(struct work_struct *work) if (vm) i915_vm_put(vm); + if (ctx->pxp_wakeref) + intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -1338,6 +1382,11 @@ i915_gem_create_context(struct drm_i915_private *i915, goto err_engines; } + if (pc->uses_protected_content) { + ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + ctx->uses_protected_content = true; + } + trace_i915_context_create(ctx); return ctx; @@ -1409,7 +1458,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, } ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err; @@ -1436,7 +1485,7 @@ void i915_gem_context_close(struct drm_file *file) unsigned long idx; xa_for_each(&file_priv->proto_context_xa, idx, pc) - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); xa_destroy(&file_priv->proto_context_xa); mutex_destroy(&file_priv->proto_context_lock); @@ -1731,6 +1780,15 @@ static int set_priority(struct i915_gem_context *ctx, return 0; } +static int get_protected(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + args->size = 0; + args->value = i915_gem_context_uses_protected_content(ctx); + + return 0; +} + static int ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1754,6 +1812,8 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = -EPERM; else if (args->value) i915_gem_context_set_bannable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't clear this for protected contexts */ else i915_gem_context_clear_bannable(ctx); break; @@ -1761,10 +1821,12 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; - else if (args->value) - i915_gem_context_set_recoverable(ctx); - else + else if (!args->value) i915_gem_context_clear_recoverable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't set this for protected contexts */ + else + i915_gem_context_set_recoverable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: @@ -1779,6 +1841,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_persistence(ctx, args); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: @@ -1857,7 +1920,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv, old = xa_erase(&file_priv->proto_context_xa, id); GEM_BUG_ON(old != pc); - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); /* One for the xarray and one for the caller */ return i915_gem_context_get(ctx); @@ -1943,7 +2006,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, goto err_pc; } - proto_context_close(ext_data.pc); + proto_context_close(i915, ext_data.pc); gem_context_register(ctx, ext_data.fpriv, id); } else { ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); @@ -1957,7 +2020,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return 0; err_pc: - proto_context_close(ext_data.pc); + proto_context_close(i915, ext_data.pc); return ret; } @@ -1988,7 +2051,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, GEM_WARN_ON(ctx && pc); if (pc) - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); if (ctx) context_close(ctx); @@ -2106,6 +2169,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = i915_gem_context_is_persistent(ctx); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = get_protected(ctx, args); + break; + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_ENGINES: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index d3279086a5e7..babfecb17ad1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -108,6 +108,12 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); } +static inline bool +i915_gem_context_uses_protected_content(const struct i915_gem_context *ctx) +{ + return ctx->uses_protected_content; +} + /* i915_gem_context.c */ void i915_gem_init__contexts(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index c4617e4d9fa9..a627b09c4680 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -198,6 +198,12 @@ struct i915_gem_proto_context { /** @single_timeline: See See &i915_gem_context.syncobj */ bool single_timeline; + + /** @uses_protected_content: See &i915_gem_context.uses_protected_content */ + bool uses_protected_content; + + /** @pxp_wakeref: See &i915_gem_context.pxp_wakeref */ + intel_wakeref_t pxp_wakeref; }; /** @@ -321,6 +327,28 @@ struct i915_gem_context { #define CONTEXT_CLOSED 0 #define CONTEXT_USER_ENGINES 1 + /** + * @uses_protected_content: context uses PXP-encrypted objects. + * + * This flag can only be set at ctx creation time and it's immutable for + * the lifetime of the context. See I915_CONTEXT_PARAM_PROTECTED_CONTENT + * in uapi/drm/i915_drm.h for more info on setting restrictions and + * expected behaviour of marked contexts. + */ + bool uses_protected_content; + + /** + * @pxp_wakeref: wakeref to keep the device awake when PXP is in use + * + * PXP sessions are invalidated when the device is suspended, which in + * turns invalidates all contexts and objects using it. To keep the + * flow simple, we keep the device awake when contexts using PXP objects + * are in use. It is expected that the userspace application only uses + * PXP when the display is on, so taking a wakeref here shouldn't worsen + * our power metrics. + */ + intel_wakeref_t pxp_wakeref; + /** @mutex: guards everything that isn't engines or handles_vma */ struct mutex mutex; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 1d341b8c47c0..8955d6abcef1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -6,6 +6,7 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" +#include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_trace.h" @@ -82,21 +83,11 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj, return 0; } -/** - * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT - * @i915: i915 private - * @size: size of the buffer, in bytes - * @placements: possible placement regions, in priority order - * @n_placements: number of possible placement regions - * - * This function is exposed primarily for selftests and does very little - * error checking. It is assumed that the set of placement regions has - * already been verified to be valid. - */ -struct drm_i915_gem_object * -__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, - struct intel_memory_region **placements, - unsigned int n_placements) +static struct drm_i915_gem_object * +__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size, + struct intel_memory_region **placements, + unsigned int n_placements, + unsigned int ext_flags) { struct intel_memory_region *mr = placements[0]; struct drm_i915_gem_object *obj; @@ -135,6 +126,9 @@ __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, GEM_BUG_ON(size != obj->base.size); + /* Add any flag set by create_ext options */ + obj->flags |= ext_flags; + trace_i915_gem_object_create(obj); return obj; @@ -145,6 +139,26 @@ object_free: return ERR_PTR(ret); } +/** + * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT + * @i915: i915 private + * @size: size of the buffer, in bytes + * @placements: possible placement regions, in priority order + * @n_placements: number of possible placement regions + * + * This function is exposed primarily for selftests and does very little + * error checking. It is assumed that the set of placement regions has + * already been verified to be valid. + */ +struct drm_i915_gem_object * +__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, + struct intel_memory_region **placements, + unsigned int n_placements) +{ + return __i915_gem_object_create_user_ext(i915, size, placements, + n_placements, 0); +} + int i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -224,6 +238,7 @@ struct create_ext { struct drm_i915_private *i915; struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; unsigned int n_placements; + unsigned long flags; }; static void repr_placements(char *buf, size_t size, @@ -353,8 +368,28 @@ static int ext_set_placements(struct i915_user_extension __user *base, return set_placements(&ext, data); } +static int ext_set_protected(struct i915_user_extension __user *base, void *data) +{ + struct drm_i915_gem_create_ext_protected_content ext; + struct create_ext *ext_data = data; + + if (copy_from_user(&ext, base, sizeof(ext))) + return -EFAULT; + + if (ext.flags) + return -EINVAL; + + if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp)) + return -ENODEV; + + ext_data->flags |= I915_BO_PROTECTED; + + return 0; +} + static const i915_user_extension_fn create_extensions[] = { [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, + [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, }; /** @@ -389,9 +424,10 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, ext_data.n_placements = 1; } - obj = __i915_gem_object_create_user(i915, args->size, - ext_data.placements, - ext_data.n_placements); + obj = __i915_gem_object_create_user_ext(i915, args->size, + ext_data.placements, + ext_data.n_placements, + ext_data.flags); if (IS_ERR(obj)) return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index c2f74dba0557..9447ce6ea500 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -21,6 +21,8 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" +#include "pxp/intel_pxp.h" + #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" @@ -810,6 +812,22 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) if (unlikely(!obj)) return ERR_PTR(-ENOENT); + /* + * If the user has opted-in for protected-object tracking, make + * sure the object encryption can be used. + * We only need to do this when the object is first used with + * this context, because the context itself will be banned when + * the protected objects become invalid. + */ + if (i915_gem_context_uses_protected_content(eb->gem_context) && + i915_gem_object_is_protected(obj)) { + err = intel_pxp_key_check(&vm->gt->pxp, obj); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + } + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index b88b121e244a..76ce6a1500bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -25,6 +25,7 @@ #include #include "display/intel_frontbuffer.h" +#include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 7f9f2e5ba0ec..9df3ee60604e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -272,6 +272,12 @@ i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); } +static inline bool +i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) +{ + return obj->flags & I915_BO_PROTECTED; +} + static inline bool i915_gem_object_type_has(const struct drm_i915_gem_object *obj, unsigned long flags) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index fa2ba9e2a4d0..7c3da4e3e737 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -304,7 +304,7 @@ struct drm_i915_gem_object { I915_BO_ALLOC_PM_EARLY) #define I915_BO_READONLY BIT(6) #define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */ - +#define I915_BO_PROTECTED BIT(8) /** * @mem_flags - Mutable placement-related flags * @@ -544,6 +544,13 @@ struct drm_i915_gem_object { bool created:1; } ttm; + /* + * Record which PXP key instance this object was created against (if + * any), so we can use it to determine if the encryption is valid by + * comparing against the current key instance. + */ + u32 pxp_key_instance; + /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 067d68a6fe4c..c0a8ef368044 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -88,7 +88,7 @@ live_context(struct drm_i915_private *i915, struct file *file) return ERR_CAST(pc); ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; @@ -163,7 +163,7 @@ kernel_context(struct drm_i915_private *i915, } ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 2e188217c3b7..ee507fd8ae19 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -7,6 +7,7 @@ #include "intel_pxp_irq.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" +#include "gem/i915_gem_context.h" #include "gt/intel_context.h" #include "i915_drv.h" @@ -178,3 +179,79 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp) intel_pxp_irq_disable(pxp); } + +int intel_pxp_key_check(struct intel_pxp *pxp, struct drm_i915_gem_object *obj) +{ + if (!intel_pxp_is_active(pxp)) + return -ENODEV; + + if (!i915_gem_object_is_protected(obj)) + return -EINVAL; + + GEM_BUG_ON(!pxp->key_instance); + + /* + * If this is the first time we're using this object, it's not + * encrypted yet; it will be encrypted with the current key, so mark it + * as such. If the object is already encrypted, check instead if the + * used key is still valid. + */ + if (!obj->pxp_key_instance) + obj->pxp_key_instance = pxp->key_instance; + else if (obj->pxp_key_instance != pxp->key_instance) + return -ENOEXEC; + + return 0; +} + +void intel_pxp_invalidate(struct intel_pxp *pxp) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct i915_gem_context *ctx, *cn; + + /* ban all contexts marked as protected */ + spin_lock_irq(&i915->gem.contexts.lock); + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { + struct i915_gem_engines_iter it; + struct intel_context *ce; + + if (!kref_get_unless_zero(&ctx->ref)) + continue; + + if (likely(!i915_gem_context_uses_protected_content(ctx))) { + i915_gem_context_put(ctx); + continue; + } + + spin_unlock_irq(&i915->gem.contexts.lock); + + /* + * By the time we get here we are either going to suspend with + * quiesced execution or the HW keys are already long gone and + * in this case it is worthless to attempt to close the context + * and wait for its execution. It will hang the GPU if it has + * not already. So, as a fast mitigation, we can ban the + * context as quick as we can. That might race with the + * execbuffer, but currently this is the best that can be done. + */ + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) + intel_context_ban(ce, NULL); + i915_gem_context_unlock_engines(ctx); + + /* + * The context has been banned, no need to keep the wakeref. + * This is safe from races because the only other place this + * is touched is context_release and we're holding a ctx ref + */ + if (ctx->pxp_wakeref) { + intel_runtime_pm_put(&i915->runtime_pm, + ctx->pxp_wakeref); + ctx->pxp_wakeref = 0; + } + + spin_lock_irq(&i915->gem.contexts.lock); + list_safe_reset_next(ctx, cn, link); + i915_gem_context_put(ctx); + } + spin_unlock_irq(&i915->gem.contexts.lock); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index 54b268280379..430f01ea9860 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -8,6 +8,8 @@ #include "intel_pxp_types.h" +struct drm_i915_gem_object; + static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) { return pxp->ce; @@ -25,6 +27,10 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp); void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); int intel_pxp_wait_for_arb_start(struct intel_pxp *pxp); + +int intel_pxp_key_check(struct intel_pxp *pxp, struct drm_i915_gem_object *obj); + +void intel_pxp_invalidate(struct intel_pxp *pxp); #else static inline void intel_pxp_init(struct intel_pxp *pxp) { @@ -38,6 +44,12 @@ static inline int intel_pxp_wait_for_arb_start(struct intel_pxp *pxp) { return -ENODEV; } + +static inline int intel_pxp_key_check(struct intel_pxp *pxp, + struct drm_i915_gem_object *obj) +{ + return -ENODEV; +} #endif #endif /* __INTEL_PXP_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 67c30e534d50..c6a5e4197e40 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -72,6 +72,9 @@ static int pxp_create_arb_session(struct intel_pxp *pxp) return ret; } + if (!++pxp->key_instance) + ++pxp->key_instance; + pxp->arb_is_valid = true; return 0; @@ -85,6 +88,9 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) /* must mark termination in progress calling this function */ GEM_WARN_ON(pxp->arb_is_valid); + /* invalidate protected objects */ + intel_pxp_invalidate(pxp); + /* terminate the hw sessions */ ret = intel_pxp_terminate_session(pxp, ARB_SESSION); if (ret) { diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 5a170e43c959..c394ab2e452b 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -7,7 +7,9 @@ #define __INTEL_PXP_TYPES_H__ #include +#include #include +#include #include #include @@ -27,6 +29,13 @@ struct intel_pxp { */ bool arb_is_valid; + /* + * Keep track of which key instance we're on, so we can use it to + * determine if an object was created using the current key or a + * previous one. + */ + u32 key_instance; + struct mutex tee_mutex; /* protects the tee channel binding */ /* diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 920e9e852e5a..aa2a7eccfb94 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1846,6 +1846,55 @@ struct drm_i915_gem_context_param { * attempted to use it, never re-use this context param number. */ #define I915_CONTEXT_PARAM_RINGSIZE 0xc + +/* + * I915_CONTEXT_PARAM_PROTECTED_CONTENT: + * + * Mark that the context makes use of protected content, which will result + * in the context being invalidated when the protected content session is. + * Given that the protected content session is killed on suspend, the device + * is kept awake for the lifetime of a protected context, so the user should + * make sure to dispose of them once done. + * This flag can only be set at context creation time and, when set to true, + * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE + * to false. This flag can't be set to true in conjunction with setting the + * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example: + * + * .. code-block:: C + * + * struct drm_i915_gem_context_create_ext_setparam p_protected = { + * .base = { + * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, + * }, + * .param = { + * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT, + * .value = 1, + * } + * }; + * struct drm_i915_gem_context_create_ext_setparam p_norecover = { + * .base = { + * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, + * .next_extension = to_user_pointer(&p_protected), + * }, + * .param = { + * .param = I915_CONTEXT_PARAM_RECOVERABLE, + * .value = 0, + * } + * }; + * struct drm_i915_gem_context_create_ext create = { + * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, + * .extensions = to_user_pointer(&p_norecover); + * }; + * + * ctx_id = gem_context_create_ext(drm_fd, &create); + * + * In addition to the normal failure cases, setting this flag during context + * creation can result in the following errors: + * + * -ENODEV: feature not available + * -EPERM: trying to mark a recoverable or not bannable context as protected + */ +#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd /* Must be kept compact -- no holes and well documented */ __u64 value; @@ -2979,8 +3028,12 @@ struct drm_i915_gem_create_ext { * * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see * struct drm_i915_gem_create_ext_memory_regions. + * + * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see + * struct drm_i915_gem_create_ext_protected_content. */ #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 __u64 extensions; }; @@ -3038,6 +3091,47 @@ struct drm_i915_gem_create_ext_memory_regions { __u64 regions; }; +/** + * struct drm_i915_gem_create_ext_protected_content - The + * I915_OBJECT_PARAM_PROTECTED_CONTENT extension. + * + * If this extension is provided, buffer contents are expected to be protected + * by PXP encryption and require decryption for scan out and processing. This + * is only possible on platforms that have PXP enabled, on all other scenarios + * using this extension will cause the ioctl to fail and return -ENODEV. The + * flags parameter is reserved for future expansion and must currently be set + * to zero. + * + * The buffer contents are considered invalid after a PXP session teardown. + * + * The encryption is guaranteed to be processed correctly only if the object + * is submitted with a context created using the + * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks + * at submission time on the validity of the objects involved. + * + * Below is an example on how to create a protected object: + * + * .. code-block:: C + * + * struct drm_i915_gem_create_ext_protected_content protected_ext = { + * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT }, + * .flags = 0, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = PAGE_SIZE, + * .extensions = (uintptr_t)&protected_ext, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + */ +struct drm_i915_gem_create_ext_protected_content { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + /** @flags: reserved for future usage, currently MBZ */ + __u32 flags; +}; + /* ID of the protected content session managed by i915 when PXP is active */ #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf -- cgit v1.2.3 From ea673f17ab7638793a8b9e7fe04b4cb758fa01f1 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 12 Oct 2021 15:12:45 -0700 Subject: drm/i915/uapi: Add comment clarifying purpose of I915_TILING_* values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The I915_TILING_* values in our uapi header are intended solely for use with the old get_tiling/set_tiling ioctls that operate on hardware de-tiling fences; all other uapi communication about tiling types is done via framebuffer modifiers rather than with these old values. On newer Intel platforms detiling fences no longer exist so the old get_tiling/set_tiling ioctls are no longer usable and will always return -EOPNOTSUPP. This means there's no reason to add new tiling types (such as the Tile4 format introduced by Xe_HP) to the uapi header here. Any kernel-internal code that needs to represent tiling format should either rely on framebuffer modifiers (as the display code does) or use some kind of non-uapi enum (as the GEM blt selftest now does). References: https://patchwork.freedesktop.org/patch/456656/?series=95308 Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Caz Yokoyama Link: https://patchwork.freedesktop.org/patch/msgid/20211012221245.2609670-1-matthew.d.roper@intel.com --- include/uapi/drm/i915_drm.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index aa2a7eccfb94..9b8e61163c39 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1522,6 +1522,12 @@ struct drm_i915_gem_caching { #define I915_TILING_NONE 0 #define I915_TILING_X 1 #define I915_TILING_Y 2 +/* + * Do not add new tiling types here. The I915_TILING_* values are for + * de-tiling fence registers that no longer exist on modern platforms. Although + * the hardware may support new types of tiling in general (e.g., Tile4), we + * do not need to add them to the uapi that is specific to now-defunct ioctls. + */ #define I915_TILING_LAST I915_TILING_Y #define I915_BIT_6_SWIZZLE_NONE 0 -- cgit v1.2.3 From 9409eb35942713d0cdd471e5ff99c93929d6a749 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:46 -0700 Subject: drm/i915: Expose logical engine instance to user Expose logical engine instance to user via query engine info IOCTL. This is required for split-frame workloads as these needs to be placed on engines in a logically contiguous order. The logical mapping can change based on fusing. Rather than having user have knowledge of the fusing we simply just expose the logical mapping with the existing query engine info IOCTL. IGT: https://patchwork.freedesktop.org/patch/445637/?series=92854&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Daniel Vetter) - Add IGT link, placeholder for media UMD Cc: Tvrtko Ursulin Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-7-matthew.brost@intel.com --- drivers/gpu/drm/i915/i915_query.c | 2 ++ include/uapi/drm/i915_drm.h | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 5e2b909827f4..51b368be0fc4 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -124,7 +124,9 @@ query_engine_info(struct drm_i915_private *i915, for_each_uabi_engine(engine, i915) { info.engine.engine_class = engine->uabi_class; info.engine.engine_instance = engine->uabi_instance; + info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; info.capabilities = engine->uabi_capabilities; + info.logical_instance = ilog2(engine->logical_mask); if (copy_to_user(info_ptr, &info, sizeof(info))) return -EFAULT; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index aa2a7eccfb94..0179f92e0916 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -2775,14 +2775,20 @@ struct drm_i915_engine_info { /** @flags: Engine flags. */ __u64 flags; +#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0) /** @capabilities: Capabilities of this engine. */ __u64 capabilities; #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) + /** @logical_instance: Logical instance of engine */ + __u16 logical_instance; + /** @rsvd1: Reserved fields. */ - __u64 rsvd1[4]; + __u16 rsvd1[3]; + /** @rsvd2: Reserved fields. */ + __u64 rsvd2[3]; }; /** -- cgit v1.2.3 From e5e32171a2cf1e434d4f88e12467f3e47d0ec618 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:56 -0700 Subject: drm/i915/guc: Connect UAPI to GuC multi-lrc interface Introduce 'set parallel submit' extension to connect UAPI to GuC multi-lrc interface. Kernel doc in new uAPI should explain it all. IGT: https://patchwork.freedesktop.org/patch/447008/?series=93071&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Daniel Vetter) - Add IGT link and placeholder for media UMD link v3: (Kernel test robot) - Fix warning in unpin engines call (John Harrison) - Reword a bunch of the kernel doc v4: (John Harrison) - Add comment why perma-pin is done after setting gem context - Update some comments / docs for proto contexts v5: (John Harrison) - Rework perma-pin comment - Add BUG_IN if context is pinned when setting gem context Cc: Tvrtko Ursulin Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-17-matthew.brost@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 230 ++++++++++++++++++++- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 16 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 9 +- drivers/gpu/drm/i915/gt/intel_engine.h | 12 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 6 +- .../gpu/drm/i915/gt/intel_execlists_submission.c | 6 +- drivers/gpu/drm/i915/gt/selftest_execlists.c | 12 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 114 ++++++++-- include/uapi/drm/i915_drm.h | 131 ++++++++++++ 9 files changed, 505 insertions(+), 31 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index d225d3dd0b40..9a00f11fef46 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -556,9 +556,150 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) return 0; } +static int +set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, + void *data) +{ + struct i915_context_engines_parallel_submit __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + u64 flags; + int err = 0, n, i, j; + u16 slot, width, num_siblings; + struct intel_engine_cs **siblings = NULL; + intel_engine_mask_t prev_mask; + + /* Disabling for now */ + return -ENODEV; + + /* FIXME: This is NIY for execlists */ + if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) + return -ENODEV; + + if (get_user(slot, &ext->engine_index)) + return -EFAULT; + + if (get_user(width, &ext->width)) + return -EFAULT; + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + if (slot >= set->num_engines) { + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + slot, set->num_engines); + return -EINVAL; + } + + if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", slot); + return -EINVAL; + } + + if (get_user(flags, &ext->flags)) + return -EFAULT; + + if (flags) { + drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); + return -EINVAL; + } + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (width < 2) { + drm_dbg(&i915->drm, "Width (%d) < 2\n", width); + return -EINVAL; + } + + if (num_siblings < 1) { + drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", + num_siblings); + return -EINVAL; + } + + siblings = kmalloc_array(num_siblings * width, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return -ENOMEM; + + /* Create contexts / engines */ + for (i = 0; i < width; ++i) { + intel_engine_mask_t current_mask = 0; + struct i915_engine_class_instance prev_engine; + + for (j = 0; j < num_siblings; ++j) { + struct i915_engine_class_instance ci; + + n = i * num_siblings + j; + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto out_err; + } + + siblings[n] = + intel_engine_lookup_user(i915, ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto out_err; + } + + if (n) { + if (prev_engine.engine_class != + ci.engine_class) { + drm_dbg(&i915->drm, + "Mismatched class %d, %d\n", + prev_engine.engine_class, + ci.engine_class); + err = -EINVAL; + goto out_err; + } + } + + prev_engine = ci; + current_mask |= siblings[n]->logical_mask; + } + + if (i > 0) { + if (current_mask != prev_mask << 1) { + drm_dbg(&i915->drm, + "Non contiguous logical mask 0x%x, 0x%x\n", + prev_mask, current_mask); + err = -EINVAL; + goto out_err; + } + } + prev_mask = current_mask; + } + + set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; + set->engines[slot].num_siblings = num_siblings; + set->engines[slot].width = width; + set->engines[slot].siblings = siblings; + + return 0; + +out_err: + kfree(siblings); + + return err; +} + static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, + [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = + set_proto_ctx_engines_parallel_submit, }; static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, @@ -794,6 +935,7 @@ static int intel_context_set_gem(struct intel_context *ce, GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); RCU_INIT_POINTER(ce->gem_context, ctx); + GEM_BUG_ON(intel_context_is_pinned(ce)); ce->ring_size = SZ_16K; i915_vm_put(ce->vm); @@ -818,6 +960,25 @@ static int intel_context_set_gem(struct intel_context *ce, return ret; } +static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) +{ + while (count--) { + struct intel_context *ce = e->engines[count], *child; + + if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) + continue; + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + } +} + +static void unpin_engines(struct i915_gem_engines *e) +{ + __unpin_engines(e, e->num_engines); +} + static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { @@ -933,6 +1094,40 @@ free_engines: return err; } +static int perma_pin_contexts(struct intel_context *ce) +{ + struct intel_context *child; + int i = 0, j = 0, ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + ret = intel_context_pin(ce); + if (unlikely(ret)) + return ret; + + for_each_child(ce, child) { + ret = intel_context_pin(child); + if (unlikely(ret)) + goto unwind; + ++i; + } + + set_bit(CONTEXT_PERMA_PIN, &ce->flags); + + return 0; + +unwind: + intel_context_unpin(ce); + for_each_child(ce, child) { + if (j++ < i) + intel_context_unpin(child); + else + break; + } + + return ret; +} + static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, unsigned int num_engines, struct i915_gem_proto_engine *pe) @@ -946,7 +1141,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, e->num_engines = num_engines; for (n = 0; n < num_engines; n++) { - struct intel_context *ce; + struct intel_context *ce, *child; int ret; switch (pe[n].type) { @@ -956,7 +1151,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, case I915_GEM_ENGINE_TYPE_BALANCED: ce = intel_engine_create_virtual(pe[n].siblings, - pe[n].num_siblings); + pe[n].num_siblings, 0); + break; + + case I915_GEM_ENGINE_TYPE_PARALLEL: + ce = intel_engine_create_parallel(pe[n].siblings, + pe[n].num_siblings, + pe[n].width); break; case I915_GEM_ENGINE_TYPE_INVALID: @@ -977,6 +1178,30 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, err = ERR_PTR(ret); goto free_engines; } + for_each_child(ce, child) { + ret = intel_context_set_gem(child, ctx, pe->sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } + + /* + * XXX: Must be done after calling intel_context_set_gem as that + * function changes the ring size. The ring is allocated when + * the context is pinned. If the ring size is changed after + * allocation we have a mismatch of the ring size and will cause + * the context to hang. Presumably with a bit of reordering we + * could move the perma-pin step to the backend function + * intel_engine_create_parallel. + */ + if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { + ret = perma_pin_contexts(ce); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } } return e; @@ -1219,6 +1444,7 @@ static void context_close(struct i915_gem_context *ctx) /* Flush any concurrent set_engines() */ mutex_lock(&ctx->engines_mutex); + unpin_engines(__context_engines_static(ctx)); engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); mutex_unlock(&ctx->engines_mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index a627b09c4680..282cdb8a5c5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -78,13 +78,16 @@ enum i915_gem_engine_type { /** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */ I915_GEM_ENGINE_TYPE_BALANCED, + + /** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */ + I915_GEM_ENGINE_TYPE_PARALLEL, }; /** * struct i915_gem_proto_engine - prototype engine * * This struct describes an engine that a context may contain. Engines - * have three types: + * have four types: * * - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they * show up as a NULL in i915_gem_engines::engines[i] and any attempt to @@ -97,6 +100,10 @@ enum i915_gem_engine_type { * * - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described * i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings. + * + * - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described + * i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and + * i915_gem_proto_engine::siblings. */ struct i915_gem_proto_engine { /** @type: Type of this engine */ @@ -105,10 +112,13 @@ struct i915_gem_proto_engine { /** @engine: Engine, for physical */ struct intel_engine_cs *engine; - /** @num_siblings: Number of balanced siblings */ + /** @num_siblings: Number of balanced or parallel siblings */ unsigned int num_siblings; - /** @siblings: Balanced siblings */ + /** @width: Width of each sibling */ + unsigned int width; + + /** @siblings: Balanced siblings or num_siblings * width for parallel */ struct intel_engine_cs **siblings; /** @sseu: Client-set SSEU parameters */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 8309d1141d0a..1d880303a7e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -55,9 +55,13 @@ struct intel_context_ops { void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); - /* virtual engine/context interface */ + /* virtual/parallel engine/context interface */ struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, - unsigned int count); + unsigned int count, + unsigned long flags); + struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, unsigned int sibling); }; @@ -113,6 +117,7 @@ struct intel_context { #define CONTEXT_NOPREEMPT 8 #define CONTEXT_LRCA_DIRTY 9 #define CONTEXT_GUC_INIT 10 +#define CONTEXT_PERMA_PIN 11 struct { u64 timeout_us; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index d5ac49c0691e..08559ace0ada 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -282,9 +282,19 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } +#define FORCE_VIRTUAL BIT(0) struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count); + unsigned int count, unsigned long flags); + +static inline struct intel_context * +intel_engine_create_parallel(struct intel_engine_cs **engines, + unsigned int num_engines, + unsigned int width) +{ + GEM_BUG_ON(!engines[0]->cops->create_parallel); + return engines[0]->cops->create_parallel(engines, num_engines, width); +} static inline bool intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 2eb798ad068b..ff6753ccb129 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1953,16 +1953,16 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count) + unsigned int count, unsigned long flags) { if (count == 0) return ERR_PTR(-EINVAL); - if (count == 1) + if (count == 1 && !(flags & FORCE_VIRTUAL)) return intel_context_create(siblings[0]); GEM_BUG_ON(!siblings[0]->cops->create_virtual); - return siblings[0]->cops->create_virtual(siblings, count); + return siblings[0]->cops->create_virtual(siblings, count, flags); } struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 43a74b216efb..bedb80057046 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -201,7 +201,8 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); static struct i915_request * __active_request(const struct intel_timeline * const tl, @@ -3784,7 +3785,8 @@ unlock: } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct virtual_engine *ve; unsigned int n; diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 25a8c4f62b0d..b367ecfa42de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt, GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); for (n = 0; n < nctx; n++) { - ve[n] = intel_engine_create_virtual(siblings, nsibling); + ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve[n])) { err = PTR_ERR(ve[n]); nctx = n; @@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt, * restrict it to our desired engine within the virtual engine. */ - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_close; @@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt, i915_request_add(rq); } - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt, /* XXX We do not handle oversubscription and fairness with normal rq */ for (n = 0; n < nsibling; n++) { - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt, if (err) goto out_scratch; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_scratch; @@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt, if (igt_spinner_init(&spin, gt)) return -ENOMEM; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_spin; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 938dc34e8d3a..7c12364a017a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -124,7 +124,13 @@ struct guc_virtual_engine { }; static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); #define GUC_REQUEST_SIZE 64 /* bytes */ @@ -2609,6 +2615,7 @@ static const struct intel_context_ops guc_context_ops = { .destroy = guc_context_destroy, .create_virtual = guc_create_virtual, + .create_parallel = guc_create_parallel, }; static void submit_work_cb(struct irq_work *wrk) @@ -2858,8 +2865,6 @@ static const struct intel_context_ops virtual_guc_context_ops = { .get_sibling = guc_virtual_get_sibling, }; -/* Future patches will use this function */ -__maybe_unused static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); @@ -2876,8 +2881,6 @@ static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) return __guc_context_pin(ce, engine, vaddr); } -/* Future patches will use this function */ -__maybe_unused static int guc_child_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); @@ -2889,8 +2892,6 @@ static int guc_child_context_pin(struct intel_context *ce, void *vaddr) return __guc_context_pin(ce, engine, vaddr); } -/* Future patches will use this function */ -__maybe_unused static void guc_parent_context_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); @@ -2906,8 +2907,6 @@ static void guc_parent_context_unpin(struct intel_context *ce) lrc_unpin(ce); } -/* Future patches will use this function */ -__maybe_unused static void guc_child_context_unpin(struct intel_context *ce) { GEM_BUG_ON(context_enabled(ce)); @@ -2918,8 +2917,6 @@ static void guc_child_context_unpin(struct intel_context *ce) lrc_unpin(ce); } -/* Future patches will use this function */ -__maybe_unused static void guc_child_context_post_unpin(struct intel_context *ce) { GEM_BUG_ON(!intel_context_is_child(ce)); @@ -2930,6 +2927,98 @@ static void guc_child_context_post_unpin(struct intel_context *ce) intel_context_unpin(ce->parallel.parent); } +static void guc_child_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + __guc_context_destroy(ce); +} + +static const struct intel_context_ops virtual_parent_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_parent_context_pin, + .unpin = guc_parent_context_unpin, + .post_unpin = guc_context_post_unpin, + + .ban = guc_context_ban, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .sched_disable = guc_context_sched_disable, + + .destroy = guc_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +static const struct intel_context_ops virtual_child_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_child_context_pin, + .unpin = guc_child_context_unpin, + .post_unpin = guc_child_context_post_unpin, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .destroy = guc_child_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width) +{ + struct intel_engine_cs **siblings = NULL; + struct intel_context *parent = NULL, *ce, *err; + int i, j; + + siblings = kmalloc_array(num_siblings, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < width; ++i) { + for (j = 0; j < num_siblings; ++j) + siblings[j] = engines[i * num_siblings + j]; + + ce = intel_engine_create_virtual(siblings, num_siblings, + FORCE_VIRTUAL); + if (!ce) { + err = ERR_PTR(-ENOMEM); + goto unwind; + } + + if (i == 0) { + parent = ce; + parent->ops = &virtual_parent_context_ops; + } else { + ce->ops = &virtual_child_context_ops; + intel_context_bind_parent_child(parent, ce); + } + } + + kfree(siblings); + return parent; + +unwind: + if (parent) + intel_context_put(parent); + kfree(siblings); + return err; +} + static bool guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b) { @@ -3756,7 +3845,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, } static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct guc_virtual_engine *ve; struct intel_guc *guc; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 0179f92e0916..c2a63e1584cb 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1824,6 +1824,7 @@ struct drm_i915_gem_context_param { * Extensions: * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) + * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT) */ #define I915_CONTEXT_PARAM_ENGINES 0xa @@ -2098,6 +2099,135 @@ struct i915_context_engines_bond { struct i915_engine_class_instance engines[N__]; \ } __attribute__((packed)) name__ +/** + * struct i915_context_engines_parallel_submit - Configure engine for + * parallel submission. + * + * Setup a slot in the context engine map to allow multiple BBs to be submitted + * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU + * in parallel. Multiple hardware contexts are created internally in the i915 to + * run these BBs. Once a slot is configured for N BBs only N BBs can be + * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user + * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how + * many BBs there are based on the slot's configuration. The N BBs are the last + * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set. + * + * The default placement behavior is to create implicit bonds between each + * context if each context maps to more than 1 physical engine (e.g. context is + * a virtual engine). Also we only allow contexts of same engine class and these + * contexts must be in logically contiguous order. Examples of the placement + * behavior are described below. Lastly, the default is to not allow BBs to be + * preempted mid-batch. Rather insert coordinated preemption points on all + * hardware contexts between each set of BBs. Flags could be added in the future + * to change both of these default behaviors. + * + * Returns -EINVAL if hardware context placement configuration is invalid or if + * the placement configuration isn't supported on the platform / submission + * interface. + * Returns -ENODEV if extension isn't supported on the platform / submission + * interface. + * + * .. code-block:: none + * + * Examples syntax: + * CS[X] = generic engine of same class, logical instance X + * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE + * + * Example 1 pseudo code: + * set_engines(INVALID) + * set_parallel(engine_index=0, width=2, num_siblings=1, + * engines=CS[0],CS[1]) + * + * Results in the following valid placement: + * CS[0], CS[1] + * + * Example 2 pseudo code: + * set_engines(INVALID) + * set_parallel(engine_index=0, width=2, num_siblings=2, + * engines=CS[0],CS[2],CS[1],CS[3]) + * + * Results in the following valid placements: + * CS[0], CS[1] + * CS[2], CS[3] + * + * This can be thought of as two virtual engines, each containing two + * engines thereby making a 2D array. However, there are bonds tying the + * entries together and placing restrictions on how they can be scheduled. + * Specifically, the scheduler can choose only vertical columns from the 2D + * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the + * scheduler wants to submit to CS[0], it must also choose CS[1] and vice + * versa. Same for CS[2] requires also using CS[3]. + * VE[0] = CS[0], CS[2] + * VE[1] = CS[1], CS[3] + * + * Example 3 pseudo code: + * set_engines(INVALID) + * set_parallel(engine_index=0, width=2, num_siblings=2, + * engines=CS[0],CS[1],CS[1],CS[3]) + * + * Results in the following valid and invalid placements: + * CS[0], CS[1] + * CS[1], CS[3] - Not logically contiguous, return -EINVAL + */ +struct i915_context_engines_parallel_submit { + /** + * @base: base user extension. + */ + struct i915_user_extension base; + + /** + * @engine_index: slot for parallel engine + */ + __u16 engine_index; + + /** + * @width: number of contexts per parallel engine or in other words the + * number of batches in each submission + */ + __u16 width; + + /** + * @num_siblings: number of siblings per context or in other words the + * number of possible placements for each submission + */ + __u16 num_siblings; + + /** + * @mbz16: reserved for future use; must be zero + */ + __u16 mbz16; + + /** + * @flags: all undefined flags must be zero, currently not defined flags + */ + __u64 flags; + + /** + * @mbz64: reserved for future use; must be zero + */ + __u64 mbz64[3]; + + /** + * @engines: 2-d array of engine instances to configure parallel engine + * + * length = width (i) * num_siblings (j) + * index = j + i * num_siblings + */ + struct i915_engine_class_instance engines[0]; + +} __packed; + +#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \ + struct i915_user_extension base; \ + __u16 engine_index; \ + __u16 width; \ + __u16 num_siblings; \ + __u16 mbz16; \ + __u64 flags; \ + __u64 mbz64[3]; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + /** * DOC: Context Engine Map uAPI * @@ -2157,6 +2287,7 @@ struct i915_context_param_engines { __u64 extensions; /* linked chain of extension blocks, 0 terminates */ #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ +#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */ struct i915_engine_class_instance engines[0]; } __attribute__((packed)); -- cgit v1.2.3 From 31fa8cbce4664946a1688898410fee41ad05364d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 28 Oct 2021 02:31:40 +0300 Subject: drm: Add R10 and R12 FourCC Add FourCCs for 10- and 12-bit red formats with padding to 16 bits. They correspond to the V4L2 10- and 12-bit greyscale (V4L2_PIX_FMT_Y10 and V4L2_PIX_FMT_Y12) formats, as well as the Bayer formats with the same bit depth (V4L2_PIX_FMT_SBGGR{10,12} and all other Bayer pattern permutations). These formats are not used by any kernel driver at this point, but need to be exposed to applications by libcamera, which uses DRM FourCCs for pixel formats. Signed-off-by: Laurent Pinchart Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20211027233140.12268-1-laurent.pinchart@ideasonboard.com --- drivers/gpu/drm/drm_fourcc.c | 2 ++ include/uapi/drm/drm_fourcc.h | 6 ++++++ 2 files changed, 8 insertions(+) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 783844bfecc1..25837b1d6639 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -134,6 +134,8 @@ const struct drm_format_info *__drm_format_info(u32 format) static const struct drm_format_info formats[] = { { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 45a914850be0..7f652c96845b 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -104,6 +104,12 @@ extern "C" { /* 8 bpp Red */ #define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ +/* 10 bpp Red */ +#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */ + +/* 12 bpp Red */ +#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */ + /* 16 bpp Red */ #define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */ -- cgit v1.2.3 From 72f4c9d57082cdd4054b599b3387220efd944095 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 25 Oct 2021 15:06:34 -0400 Subject: drm/amdgpu/UAPI: rearrange header to better align related items MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the RAS query parameters to align with the INFO query where they are used. No functional change. Reviewed-by: Christian König Reviewed-by: Luben Tuikov Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 0cbd1540aeac..26e45fc5eb1a 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -786,13 +786,6 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F /* query ras mask of enabled features*/ #define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20 -/* query video encode/decode caps */ -#define AMDGPU_INFO_VIDEO_CAPS 0x21 - /* Subquery id: Decode */ - #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0 - /* Subquery id: Encode */ - #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 - /* RAS MASK: UMC (VRAM) */ #define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0) /* RAS MASK: SDMA */ @@ -821,6 +814,12 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12) /* RAS MASK: FUSE */ #define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13) +/* query video encode/decode caps */ +#define AMDGPU_INFO_VIDEO_CAPS 0x21 + /* Subquery id: Decode */ + #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0 + /* Subquery id: Encode */ + #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff -- cgit v1.2.3