diff options
Diffstat (limited to 'include/uapi/drm')
-rw-r--r-- | include/uapi/drm/amdgpu_drm.h | 327 | ||||
-rw-r--r-- | include/uapi/drm/asahi_drm.h | 1194 | ||||
-rw-r--r-- | include/uapi/drm/drm.h | 4 | ||||
-rw-r--r-- | include/uapi/drm/drm_fourcc.h | 86 | ||||
-rw-r--r-- | include/uapi/drm/ivpu_accel.h | 88 | ||||
-rw-r--r-- | include/uapi/drm/nova_drm.h | 101 | ||||
-rw-r--r-- | include/uapi/drm/panthor_drm.h | 109 | ||||
-rw-r--r-- | include/uapi/drm/virtgpu_drm.h | 6 | ||||
-rw-r--r-- | include/uapi/drm/xe_drm.h | 251 |
9 files changed, 2117 insertions, 49 deletions
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index aaa4f3bc688b..45c4fa13499c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -54,6 +54,9 @@ extern "C" { #define DRM_AMDGPU_VM 0x13 #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 #define DRM_AMDGPU_SCHED 0x15 +#define DRM_AMDGPU_USERQ 0x16 +#define DRM_AMDGPU_USERQ_SIGNAL 0x17 +#define DRM_AMDGPU_USERQ_WAIT 0x18 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) @@ -71,6 +74,9 @@ extern "C" { #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) +#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq) +#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal) +#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait) /** * DOC: memory domains @@ -319,6 +325,260 @@ union drm_amdgpu_ctx { union drm_amdgpu_ctx_out out; }; +/* user queue IOCTL operations */ +#define AMDGPU_USERQ_OP_CREATE 1 +#define AMDGPU_USERQ_OP_FREE 2 + +/* queue priority levels */ +/* low < normal low < normal high < high */ +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */ +/* for queues that need access to protected content */ +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2) + +/* + * This structure is a container to pass input configuration + * info for all supported userqueue related operations. + * For operation AMDGPU_USERQ_OP_CREATE: user is expected + * to set all fields, excep the parameter 'queue_id'. + * For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected + * to be set is 'queue_id', eveything else is ignored. + */ +struct drm_amdgpu_userq_in { + /** AMDGPU_USERQ_OP_* */ + __u32 op; + /** Queue id passed for operation USERQ_OP_FREE */ + __u32 queue_id; + /** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */ + __u32 ip_type; + /** + * @doorbell_handle: the handle of doorbell GEM object + * associated with this userqueue client. + */ + __u32 doorbell_handle; + /** + * @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo. + * Kernel will generate absolute doorbell offset using doorbell_handle + * and doorbell_offset in the doorbell bo. + */ + __u32 doorbell_offset; + /** + * @flags: flags used for queue parameters + */ + __u32 flags; + /** + * @queue_va: Virtual address of the GPU memory which holds the queue + * object. The queue holds the workload packets. + */ + __u64 queue_va; + /** + * @queue_size: Size of the queue in bytes, this needs to be 256-byte + * aligned. + */ + __u64 queue_size; + /** + * @rptr_va : Virtual address of the GPU memory which holds the ring RPTR. + * This object must be at least 8 byte in size and aligned to 8-byte offset. + */ + __u64 rptr_va; + /** + * @wptr_va : Virtual address of the GPU memory which holds the ring WPTR. + * This object must be at least 8 byte in size and aligned to 8-byte offset. + * + * Queue, RPTR and WPTR can come from the same object, as long as the size + * and alignment related requirements are met. + */ + __u64 wptr_va; + /** + * @mqd: MQD (memory queue descriptor) is a set of parameters which allow + * the GPU to uniquely define and identify a usermode queue. + * + * MQD data can be of different size for different GPU IP/engine and + * their respective versions/revisions, so this points to a __u64 * + * which holds IP specific MQD of this usermode queue. + */ + __u64 mqd; + /** + * @size: size of MQD data in bytes, it must match the MQD structure + * size of the respective engine/revision defined in UAPI for ex, for + * gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11). + */ + __u64 mqd_size; +}; + +/* The structure to carry output of userqueue ops */ +struct drm_amdgpu_userq_out { + /** + * For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique + * queue ID to represent the newly created userqueue in the system, otherwise + * it should be ignored. + */ + __u32 queue_id; + __u32 _pad; +}; + +union drm_amdgpu_userq { + struct drm_amdgpu_userq_in in; + struct drm_amdgpu_userq_out out; +}; + +/* GFX V11 IP specific MQD parameters */ +struct drm_amdgpu_userq_mqd_gfx11 { + /** + * @shadow_va: Virtual address of the GPU memory to hold the shadow buffer. + * Use AMDGPU_INFO_IOCTL to find the exact size of the object. + */ + __u64 shadow_va; + /** + * @csa_va: Virtual address of the GPU memory to hold the CSA buffer. + * Use AMDGPU_INFO_IOCTL to find the exact size of the object. + */ + __u64 csa_va; +}; + +/* GFX V11 SDMA IP specific MQD parameters */ +struct drm_amdgpu_userq_mqd_sdma_gfx11 { + /** + * @csa_va: Virtual address of the GPU memory to hold the CSA buffer. + * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL + * to get the size. + */ + __u64 csa_va; +}; + +/* GFX V11 Compute IP specific MQD parameters */ +struct drm_amdgpu_userq_mqd_compute_gfx11 { + /** + * @eop_va: Virtual address of the GPU memory to hold the EOP buffer. + * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL + * to get the size. + */ + __u64 eop_va; +}; + +/* userq signal/wait ioctl */ +struct drm_amdgpu_userq_signal { + /** + * @queue_id: Queue handle used by the userq fence creation function + * to retrieve the WPTR. + */ + __u32 queue_id; + __u32 pad; + /** + * @syncobj_handles: The list of syncobj handles submitted by the user queue + * job to be signaled. + */ + __u64 syncobj_handles; + /** + * @num_syncobj_handles: A count that represents the number of syncobj handles in + * @syncobj_handles. + */ + __u64 num_syncobj_handles; + /** + * @bo_read_handles: The list of BO handles that the submitted user queue job + * is using for read only. This will update BO fences in the kernel. + */ + __u64 bo_read_handles; + /** + * @bo_write_handles: The list of BO handles that the submitted user queue job + * is using for write only. This will update BO fences in the kernel. + */ + __u64 bo_write_handles; + /** + * @num_bo_read_handles: A count that represents the number of read BO handles in + * @bo_read_handles. + */ + __u32 num_bo_read_handles; + /** + * @num_bo_write_handles: A count that represents the number of write BO handles in + * @bo_write_handles. + */ + __u32 num_bo_write_handles; +}; + +struct drm_amdgpu_userq_fence_info { + /** + * @va: A gpu address allocated for each queue which stores the + * read pointer (RPTR) value. + */ + __u64 va; + /** + * @value: A 64 bit value represents the write pointer (WPTR) of the + * queue commands which compared with the RPTR value to signal the + * fences. + */ + __u64 value; +}; + +struct drm_amdgpu_userq_wait { + /** + * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the + * wait queue and maintain the fence driver references in it. + */ + __u32 waitq_id; + __u32 pad; + /** + * @syncobj_handles: The list of syncobj handles submitted by the user queue + * job to get the va/value pairs. + */ + __u64 syncobj_handles; + /** + * @syncobj_timeline_handles: The list of timeline syncobj handles submitted by + * the user queue job to get the va/value pairs at given @syncobj_timeline_points. + */ + __u64 syncobj_timeline_handles; + /** + * @syncobj_timeline_points: The list of timeline syncobj points submitted by the + * user queue job for the corresponding @syncobj_timeline_handles. + */ + __u64 syncobj_timeline_points; + /** + * @bo_read_handles: The list of read BO handles submitted by the user queue + * job to get the va/value pairs. + */ + __u64 bo_read_handles; + /** + * @bo_write_handles: The list of write BO handles submitted by the user queue + * job to get the va/value pairs. + */ + __u64 bo_write_handles; + /** + * @num_syncobj_timeline_handles: A count that represents the number of timeline + * syncobj handles in @syncobj_timeline_handles. + */ + __u16 num_syncobj_timeline_handles; + /** + * @num_fences: This field can be used both as input and output. As input it defines + * the maximum number of fences that can be returned and as output it will specify + * how many fences were actually returned from the ioctl. + */ + __u16 num_fences; + /** + * @num_syncobj_handles: A count that represents the number of syncobj handles in + * @syncobj_handles. + */ + __u32 num_syncobj_handles; + /** + * @num_bo_read_handles: A count that represents the number of read BO handles in + * @bo_read_handles. + */ + __u32 num_bo_read_handles; + /** + * @num_bo_write_handles: A count that represents the number of write BO handles in + * @bo_write_handles. + */ + __u32 num_bo_write_handles; + /** + * @out_fences: The field is a return value from the ioctl containing the list of + * address/value pairs to wait for. + */ + __u64 out_fences; +}; + /* vm ioctl */ #define AMDGPU_VM_OP_RESERVE_VMID 1 #define AMDGPU_VM_OP_UNRESERVE_VMID 2 @@ -599,6 +859,19 @@ struct drm_amdgpu_gem_va { __u64 offset_in_bo; /** Specify mapping size. Must be correctly aligned. */ __u64 map_size; + /** + * vm_timeline_point is a sequence number used to add new timeline point. + */ + __u64 vm_timeline_point; + /** + * The vm page table update fence is installed in given vm_timeline_syncobj_out + * at vm_timeline_point. + */ + __u32 vm_timeline_syncobj_out; + /** the number of syncobj handles in @input_fence_syncobj_handles */ + __u32 num_syncobj_handles; + /** Array of sync object handle to wait for given input fences */ + __u64 input_fence_syncobj_handles; }; #define AMDGPU_HW_IP_GFX 0 @@ -763,6 +1036,16 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_IDS_FLAGS_TMZ 0x4 #define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8 +/* + * Query h/w info: Flag identifying VF/PF/PT mode + * + */ +#define AMDGPU_IDS_FLAGS_MODE_MASK 0x300 +#define AMDGPU_IDS_FLAGS_MODE_SHIFT 0x8 +#define AMDGPU_IDS_FLAGS_MODE_PF 0x0 +#define AMDGPU_IDS_FLAGS_MODE_VF 0x1 +#define AMDGPU_IDS_FLAGS_MODE_PT 0x2 + /* indicate if acceleration can be working */ #define AMDGPU_INFO_ACCEL_WORKING 0x00 /* get the crtc_id from the mode object id? */ @@ -930,6 +1213,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_INFO_MAX_IBS 0x22 /* query last page fault info */ #define AMDGPU_INFO_GPUVM_FAULT 0x23 +/* query FW object size and alignment */ +#define AMDGPU_INFO_UQ_FW_AREAS 0x24 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff @@ -1083,6 +1368,7 @@ struct drm_amdgpu_info_vbios { #define AMDGPU_VRAM_TYPE_DDR5 10 #define AMDGPU_VRAM_TYPE_LPDDR4 11 #define AMDGPU_VRAM_TYPE_LPDDR5 12 +#define AMDGPU_VRAM_TYPE_HBM3E 13 struct drm_amdgpu_info_device { /** PCI Device ID */ @@ -1188,6 +1474,9 @@ struct drm_amdgpu_info_device { __u32 csa_size; /* context save area base virtual alignment for gfx11 */ __u32 csa_alignment; + /* Userq IP mask (1 << AMDGPU_HW_IP_*) */ + __u32 userq_ip_mask; + __u32 pad; }; struct drm_amdgpu_info_hw_ip { @@ -1206,6 +1495,27 @@ struct drm_amdgpu_info_hw_ip { __u32 ip_discovery_version; }; +/* GFX metadata BO sizes and alignment info (in bytes) */ +struct drm_amdgpu_info_uq_fw_areas_gfx { + /* shadow area size */ + __u32 shadow_size; + /* shadow area base virtual mem alignment */ + __u32 shadow_alignment; + /* context save area size */ + __u32 csa_size; + /* context save area base virtual mem alignment */ + __u32 csa_alignment; +}; + +/* IP specific fw related information used in the + * subquery AMDGPU_INFO_UQ_FW_AREAS + */ +struct drm_amdgpu_info_uq_fw_areas { + union { + struct drm_amdgpu_info_uq_fw_areas_gfx gfx; + }; +}; + struct drm_amdgpu_info_num_handles { /** Max handles as supported by firmware for UVD */ __u32 uvd_max_handles; @@ -1269,6 +1579,23 @@ struct drm_amdgpu_info_gpuvm_fault { __u32 vmhub; }; +struct drm_amdgpu_info_uq_metadata_gfx { + /* shadow area size for gfx11 */ + __u32 shadow_size; + /* shadow area base virtual alignment for gfx11 */ + __u32 shadow_alignment; + /* context save area size for gfx11 */ + __u32 csa_size; + /* context save area base virtual alignment for gfx11 */ + __u32 csa_alignment; +}; + +struct drm_amdgpu_info_uq_metadata { + union { + struct drm_amdgpu_info_uq_metadata_gfx gfx; + }; +}; + /* * Supported GPU families */ diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h new file mode 100644 index 000000000000..de67f1c603af --- /dev/null +++ b/include/uapi/drm/asahi_drm.h @@ -0,0 +1,1194 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) The Asahi Linux Contributors + * Copyright (C) 2018-2023 Collabora Ltd. + * Copyright (C) 2014-2018 Broadcom + */ +#ifndef _ASAHI_DRM_H_ +#define _ASAHI_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: Introduction to the Asahi UAPI + * + * This documentation describes the Asahi IOCTLs. + * + * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed + * from Panthor): + * + * - Structures must be aligned on 64-bit/8-byte. If the object is not + * naturally aligned, a padding field must be added. + * - Fields must be explicitly aligned to their natural type alignment with + * pad[0..N] fields. + * - All padding fields will be checked by the driver to make sure they are + * zeroed. + * - Flags can be added, but not removed/replaced. + * - New fields can be added to the main structures (the structures + * directly passed to the ioctl). Those fields can be added at the end of + * the structure, or replace existing padding fields. Any new field being + * added must preserve the behavior that existed before those fields were + * added when a value of zero is passed. + * - New fields can be added to indirect objects (objects pointed by the + * main structure), iff those objects are passed a size to reflect the + * size known by the userspace driver (see + * drm_asahi_cmd_header::size). + * - If the kernel driver is too old to know some fields, those will be + * ignored if zero, and otherwise rejected (and so will be zero on output). + * - If userspace is too old to know some fields, those will be zeroed + * (input) before the structure is parsed by the kernel driver. + * - Each new flag/field addition must come with a driver version update so + * the userspace driver doesn't have to guess which flags are supported. + * - Structures should not contain unions, as this would defeat the + * extensibility of such structures. + * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed + * at the end of the drm_asahi_ioctl_id enum. + */ + +/** + * enum drm_asahi_ioctl_id - IOCTL IDs + * + * Place new ioctls at the end, don't re-order, don't replace or remove entries. + * + * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx + * definitions instead. + */ +enum drm_asahi_ioctl_id { + /** @DRM_ASAHI_GET_PARAMS: Query device properties. */ + DRM_ASAHI_GET_PARAMS = 0, + + /** @DRM_ASAHI_GET_TIME: Query device time. */ + DRM_ASAHI_GET_TIME, + + /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */ + DRM_ASAHI_VM_CREATE, + + /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */ + DRM_ASAHI_VM_DESTROY, + + /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */ + DRM_ASAHI_VM_BIND, + + /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */ + DRM_ASAHI_GEM_CREATE, + + /** + * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a + * given GEM handle. + */ + DRM_ASAHI_GEM_MMAP_OFFSET, + + /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */ + DRM_ASAHI_GEM_BIND_OBJECT, + + /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */ + DRM_ASAHI_QUEUE_CREATE, + + /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */ + DRM_ASAHI_QUEUE_DESTROY, + + /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */ + DRM_ASAHI_SUBMIT, +}; + +#define DRM_ASAHI_MAX_CLUSTERS 64 + +/** + * struct drm_asahi_params_global - Global parameters. + * + * This struct may be queried by drm_asahi_get_params. + */ +struct drm_asahi_params_global { + /** @features: Feature bits from drm_asahi_feature */ + __u64 features; + + /** @gpu_generation: GPU generation, e.g. 13 for G13G */ + __u32 gpu_generation; + + /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */ + __u32 gpu_variant; + + /** + * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or + * 0x21 for 'C1' + */ + __u32 gpu_revision; + + /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */ + __u32 chip_id; + + /** @num_dies: Number of dies in the SoC */ + __u32 num_dies; + + /** @num_clusters_total: Number of GPU clusters (across all dies) */ + __u32 num_clusters_total; + + /** + * @num_cores_per_cluster: Number of logical cores per cluster + * (including inactive/nonexistent) + */ + __u32 num_cores_per_cluster; + + /** @max_frequency_khz: Maximum GPU core clock frequency */ + __u32 max_frequency_khz; + + /** @core_masks: Bitmask of present/enabled cores per cluster */ + __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS]; + + /** + * @vm_start: VM range start VMA. Together with @vm_end, this defines + * the window of valid GPU VAs. Userspace is expected to subdivide VAs + * out of this window. + * + * This window contains all virtual addresses that userspace needs to + * know about. There may be kernel-internal GPU VAs outside this range, + * but that detail is not relevant here. + */ + __u64 vm_start; + + /** @vm_end: VM range end VMA */ + __u64 vm_end; + + /** + * @vm_kernel_min_size: Minimum kernel VMA window size. + * + * When creating a VM, userspace is required to carve out a section of + * virtual addresses (within the range given by @vm_start and + * @vm_end). The kernel will allocate various internal structures + * within the specified VA range. + * + * Allowing userspace to choose the VA range for the kernel, rather than + * the kernel reserving VAs and requiring userspace to cope, can assist + * in implementing SVM. + */ + __u64 vm_kernel_min_size; + + /** + * @max_commands_per_submission: Maximum number of supported commands + * per submission. This mirrors firmware limits. Userspace must split up + * larger command buffers, which may require inserting additional + * synchronization. + */ + __u32 max_commands_per_submission; + + /** + * @max_attachments: Maximum number of drm_asahi_attachment's per + * command + */ + __u32 max_attachments; + + /** + * @command_timestamp_frequency_hz: Timebase frequency for timestamps + * written during command execution, specified via drm_asahi_timestamp + * structures. As this rate is controlled by the firmware, it is a + * queryable parameter. + * + * Userspace must divide by this frequency to convert timestamps to + * seconds, rather than hardcoding a particular firmware's rate. + */ + __u64 command_timestamp_frequency_hz; +}; + +/** + * enum drm_asahi_feature - Feature bits + * + * This covers only features that userspace cannot infer from the architecture + * version. Most features don't need to be here. + */ +enum drm_asahi_feature { + /** + * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader + * loads of unmapped memory will return zero. Shader stores to unmapped + * memory will be silently discarded. Note that only shader load/store + * is affected. Other hardware units are not affected, notably including + * texture sampling. + * + * Soft fault is set when initializing the GPU and cannot be runtime + * toggled. Therefore, it is exposed as a feature bit and not a + * userspace-settable flag on the VM. When soft fault is enabled, + * userspace can speculate memory accesses more aggressively. + */ + DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0, +}; + +/** + * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS + */ +struct drm_asahi_get_params { + /** @param_group: Parameter group to fetch (MBZ) */ + __u32 param_group; + + /** @pad: MBZ */ + __u32 pad; + + /** @pointer: User pointer to write parameter struct */ + __u64 pointer; + + /** + * @size: Size of the user buffer. In case of older userspace, this may + * be less than sizeof(struct drm_asahi_params_global). The kernel will + * not write past the length specified here, allowing extensibility. + */ + __u64 size; +}; + +/** + * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE + */ +struct drm_asahi_vm_create { + /** + * @kernel_start: Start of the kernel-reserved address range. See + * drm_asahi_params_global::vm_kernel_min_size. + * + * Both @kernel_start and @kernel_end must be within the range of + * valid VAs given by drm_asahi_params_global::vm_start and + * drm_asahi_params_global::vm_end. The size of the kernel range + * (@kernel_end - @kernel_start) must be at least + * drm_asahi_params_global::vm_kernel_min_size. + * + * Userspace must not bind any memory on this VM into this reserved + * range, it is for kernel use only. + */ + __u64 kernel_start; + + /** + * @kernel_end: End of the kernel-reserved address range. See + * @kernel_start. + */ + __u64 kernel_end; + + /** @vm_id: Returned VM ID */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY + */ +struct drm_asahi_vm_destroy { + /** @vm_id: VM ID to be destroyed */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_gem_flags - Flags for GEM creation + */ +enum drm_asahi_gem_flags { + /** + * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback. + * + * Map as writeback instead of write-combine. This optimizes for CPU + * reads. + */ + DRM_ASAHI_GEM_WRITEBACK = (1L << 0), + + /** + * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports). + */ + DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1), +}; + +/** + * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE + */ +struct drm_asahi_gem_create { + /** @size: Size of the BO */ + __u64 size; + + /** @flags: Combination of drm_asahi_gem_flags flags. */ + __u32 flags; + + /** + * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set + */ + __u32 vm_id; + + /** @handle: Returned GEM handle for the BO */ + __u32 handle; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_gem_mmap_offset - Arguments passed to + * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET + */ +struct drm_asahi_gem_mmap_offset { + /** @handle: Handle for the object being mapped. */ + __u32 handle; + + /** @flags: Must be zero */ + __u32 flags; + + /** @offset: The fake offset to use for subsequent mmap call */ + __u64 offset; +}; + +/** + * enum drm_asahi_bind_flags - Flags for GEM binding + */ +enum drm_asahi_bind_flags { + /** + * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range, + * simply unbind the GPU VMA range. + */ + DRM_ASAHI_BIND_UNBIND = (1L << 0), + + /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */ + DRM_ASAHI_BIND_READ = (1L << 1), + + /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */ + DRM_ASAHI_BIND_WRITE = (1L << 2), + + /** + * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly + * across the VA range. + * + * This is useful to fill a VA range with scratch pages or zero pages. + * It is intended as a mechanism to accelerate sparse. + */ + DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3), +}; + +/** + * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation. + */ +struct drm_asahi_gem_bind_op { + /** @flags: Combination of drm_asahi_bind_flags flags. */ + __u32 flags; + + /** @handle: GEM object to bind (except for UNBIND) */ + __u32 handle; + + /** + * @offset: Offset into the object (except for UNBIND). + * + * For a regular bind, this is the beginning of the region of the GEM + * object to bind. + * + * For a single-page bind, this is the offset to the single page that + * will be repeatedly bound. + * + * Must be page-size aligned. + */ + __u64 offset; + + /** + * @range: Number of bytes to bind/unbind to @addr. + * + * Must be page-size aligned. + */ + __u64 range; + + /** + * @addr: Address to bind to. + * + * Must be page-size aligned. + */ + __u64 addr; +}; + +/** + * struct drm_asahi_vm_bind - Arguments passed to + * DRM_IOCTL_ASAHI_VM_BIND + */ +struct drm_asahi_vm_bind { + /** @vm_id: The ID of the VM to bind to */ + __u32 vm_id; + + /** @num_binds: number of binds in this IOCTL. */ + __u32 num_binds; + + /** + * @stride: Stride in bytes between consecutive binds. This allows + * extensibility of drm_asahi_gem_bind_op. + */ + __u32 stride; + + /** @pad: MBZ */ + __u32 pad; + + /** + * @userptr: User pointer to an array of @num_binds structures of type + * @drm_asahi_gem_bind_op and size @stride bytes. + */ + __u64 userptr; +}; + +/** + * enum drm_asahi_bind_object_op - Special object bind operation + */ +enum drm_asahi_bind_object_op { + /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */ + DRM_ASAHI_BIND_OBJECT_OP_BIND = 0, + + /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */ + DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1, +}; + +/** + * enum drm_asahi_bind_object_flags - Special object bind flags + */ +enum drm_asahi_bind_object_flags { + /** + * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp + * buffer. + */ + DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0), +}; + +/** + * struct drm_asahi_gem_bind_object - Arguments passed to + * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT + */ +struct drm_asahi_gem_bind_object { + /** @op: Bind operation (enum drm_asahi_bind_object_op) */ + __u32 op; + + /** @flags: Combination of drm_asahi_bind_object_flags flags. */ + __u32 flags; + + /** @handle: GEM object to bind/unbind (BIND) */ + __u32 handle; + + /** @vm_id: The ID of the VM to operate on (MBZ currently) */ + __u32 vm_id; + + /** @offset: Offset into the object (BIND only) */ + __u64 offset; + + /** @range: Number of bytes to bind/unbind (BIND only) */ + __u64 range; + + /** @object_handle: Object handle (out for BIND, in for UNBIND) */ + __u32 object_handle; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_cmd_type - Command type + */ +enum drm_asahi_cmd_type { + /** + * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render + * subqueue. Combined vertex and fragment operation. + * + * Followed by a @drm_asahi_cmd_render payload. + */ + DRM_ASAHI_CMD_RENDER = 0, + + /** + * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue. + * + * Followed by a @drm_asahi_cmd_compute payload. + */ + DRM_ASAHI_CMD_COMPUTE = 1, + + /** + * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set + * attachments for subsequent vertex shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2, + + /** + * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set + * attachments for subsequent fragment shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3, + + /** + * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set + * attachments for subsequent compute shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4, +}; + +/** + * enum drm_asahi_priority - Scheduling queue priority. + * + * These priorities are forwarded to the firmware to influence firmware + * scheduling. The exact policy is ultimately decided by firmware, but + * these enums allow userspace to communicate the intentions. + */ +enum drm_asahi_priority { + /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */ + DRM_ASAHI_PRIORITY_LOW = 0, + + /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */ + DRM_ASAHI_PRIORITY_MEDIUM = 1, + + /** + * @DRM_ASAHI_PRIORITY_HIGH: High priority queue. + * + * Reserved for future extension. + */ + DRM_ASAHI_PRIORITY_HIGH = 2, + + /** + * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue. + * + * Reserved for future extension. + */ + DRM_ASAHI_PRIORITY_REALTIME = 3, +}; + +/** + * struct drm_asahi_queue_create - Arguments passed to + * DRM_IOCTL_ASAHI_QUEUE_CREATE + */ +struct drm_asahi_queue_create { + /** @flags: MBZ */ + __u32 flags; + + /** @vm_id: The ID of the VM this queue is bound to */ + __u32 vm_id; + + /** @priority: One of drm_asahi_priority */ + __u32 priority; + + /** @queue_id: The returned queue ID */ + __u32 queue_id; + + /** + * @usc_exec_base: GPU base address for all USC binaries (shaders) on + * this queue. USC addresses are 32-bit relative to this 64-bit base. + * + * This sets the following registers on all queue commands: + * + * USC_EXEC_BASE_TA (vertex) + * USC_EXEC_BASE_ISP (fragment) + * USC_EXEC_BASE_CP (compute) + * + * While the hardware lets us configure these independently per command, + * we do not have a use case for this. Instead, we expect userspace to + * fix a 4GiB VA carveout for USC memory and pass its base address here. + */ + __u64 usc_exec_base; +}; + +/** + * struct drm_asahi_queue_destroy - Arguments passed to + * DRM_IOCTL_ASAHI_QUEUE_DESTROY + */ +struct drm_asahi_queue_destroy { + /** @queue_id: The queue ID to be destroyed */ + __u32 queue_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_sync_type - Sync item type + */ +enum drm_asahi_sync_type { + /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */ + DRM_ASAHI_SYNC_SYNCOBJ = 0, + + /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */ + DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1, +}; + +/** + * struct drm_asahi_sync - Sync item + */ +struct drm_asahi_sync { + /** @sync_type: One of drm_asahi_sync_type */ + __u32 sync_type; + + /** @handle: The sync object handle */ + __u32 handle; + + /** @timeline_value: Timeline value for timeline sync objects */ + __u64 timeline_value; +}; + +/** + * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier + * + * This special value may be passed in to drm_asahi_command::vdm_barrier or + * drm_asahi_command::cdm_barrier to indicate that the respective subqueue + * should not wait on any previous work. + */ +#define DRM_ASAHI_BARRIER_NONE (0xFFFFu) + +/** + * struct drm_asahi_cmd_header - Top level command structure + * + * This struct is core to the command buffer definition and therefore is not + * extensible. + */ +struct drm_asahi_cmd_header { + /** @cmd_type: One of drm_asahi_cmd_type */ + __u16 cmd_type; + + /** + * @size: Size of this command, not including this header. + * + * For hardware commands, this enables extensibility of commands without + * requiring extra command types. Passing a command that is shorter + * than expected is explicitly allowed for backwards-compatibility. + * Truncated fields will be zeroed. + * + * For the synthetic attachment setting commands, this implicitly + * encodes the number of attachments. These commands take multiple + * fixed-size @drm_asahi_attachment structures as their payload, so size + * equals number of attachments * sizeof(struct drm_asahi_attachment). + */ + __u16 size; + + /** + * @vdm_barrier: VDM (render) command index to wait on. + * + * Barriers are indices relative to the beginning of a given submit. A + * barrier of 0 waits on commands submitted to the respective subqueue + * in previous submit ioctls. A barrier of N waits on N previous + * commands on the subqueue within the current submit ioctl. As a + * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any + * commands in the subqueue. + * + * Examples: + * + * 0: This waits on all previous work. + * + * NONE: This does not wait for anything on this subqueue. + * + * 1: This waits on the first render command in the submit. + * This is valid only if there are multiple render commands in the + * same submit. + * + * Barriers are valid only for hardware commands. Synthetic software + * commands to set attachments must pass NONE here. + */ + __u16 vdm_barrier; + + /** + * @cdm_barrier: CDM (compute) command index to wait on. + * + * See @vdm_barrier, and replace VDM/render with CDM/compute. + */ + __u16 cdm_barrier; +}; + +/** + * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT + */ +struct drm_asahi_submit { + /** + * @syncs: An optional pointer to an array of drm_asahi_sync. The first + * @in_sync_count elements are in-syncs, then the remaining + * @out_sync_count elements are out-syncs. Using a single array with + * explicit partitioning simplifies handling. + */ + __u64 syncs; + + /** + * @cmdbuf: Pointer to the command buffer to submit. + * + * This is a flat command buffer. By design, it contains no CPU + * pointers, which makes it suitable for a virtgpu wire protocol without + * requiring any serializing/deserializing step. + * + * It consists of a series of commands. Each command begins with a + * fixed-size @drm_asahi_cmd_header header and is followed by a + * variable-length payload according to the type and size in the header. + * + * The combined count of "real" hardware commands must be nonzero and at + * most drm_asahi_params_global::max_commands_per_submission. + */ + __u64 cmdbuf; + + /** @flags: Flags for command submission (MBZ) */ + __u32 flags; + + /** @queue_id: The queue ID to be submitted to */ + __u32 queue_id; + + /** + * @in_sync_count: Number of sync objects to wait on before starting + * this job. + */ + __u32 in_sync_count; + + /** + * @out_sync_count: Number of sync objects to signal upon completion of + * this job. + */ + __u32 out_sync_count; + + /** @cmdbuf_size: Command buffer size in bytes */ + __u32 cmdbuf_size; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_attachment - Describe an "attachment". + * + * Attachments are any memory written by shaders, notably including render + * target attachments written by the end-of-tile program. This is purely a hint + * about the accessed memory regions. It is optional to specify, which is + * fortunate as it cannot be specified precisely with bindless access anyway. + * But where possible, it's probably a good idea for userspace to include these + * hints, forwarded to the firmware. + * + * This struct is implicitly sized and therefore is not extensible. + */ +struct drm_asahi_attachment { + /** @pointer: Base address of the attachment */ + __u64 pointer; + + /** @size: Size of the attachment in bytes */ + __u64 size; + + /** @pad: MBZ */ + __u32 pad; + + /** @flags: MBZ */ + __u32 flags; +}; + +enum drm_asahi_render_flags { + /** + * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch + * memory. + */ + DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0), + + /** + * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles. + * This must be set when clearing render targets. + */ + DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1), + + /** + * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single + * cluster (on multi-cluster GPUs) + * + * This harms performance but can workaround certain sync/coherency + * bugs, and therefore is useful for debugging. + */ + DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2), + + /** + * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula. + * + * Graphics specifications contain two alternate formulas for depth + * bias, a float formula used with floating-point depth buffers and an + * integer formula using with unorm depth buffers. This flag specifies + * that the integer formula should be used. If omitted, the float + * formula is used instead. + * + * This corresponds to bit 18 of the relevant hardware control register, + * so we match that here for efficiency. + */ + DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18), +}; + +/** + * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer. + * + * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit. + * There are three hardware registers for each field respectively for loads, + * stores, and partial renders. In practice, it makes sense to set all to the + * same values, except in exceptional cases not yet implemented in userspace, so + * we do not duplicate here for simplicity/efficiency. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_zls_buffer { + /** @base: Base address of the buffer */ + __u64 base; + + /** + * @comp_base: If the load buffer is compressed, address of the + * compression metadata section. + */ + __u64 comp_base; + + /** + * @stride: If layered rendering is enabled, the number of bytes + * between each layer of the buffer. + */ + __u32 stride; + + /** + * @comp_stride: If layered rendering is enabled, the number of bytes + * between each layer of the compression metadata. + */ + __u32 comp_stride; +}; + +/** + * struct drm_asahi_timestamp - Describe a timestamp write. + * + * The firmware can optionally write the GPU timestamp at render pass + * granularities, but it needs to be mapped specially via + * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to + * write as a handle-offset pair, rather than a GPU address like normal. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_timestamp { + /** + * @handle: Handle of the timestamp buffer, or 0 to skip this + * timestamp. If nonzero, this must equal the value returned in + * drm_asahi_gem_bind_object::object_handle. + */ + __u32 handle; + + /** @offset: Offset to write into the timestamp buffer */ + __u32 offset; +}; + +/** + * struct drm_asahi_timestamps - Describe timestamp writes. + * + * Each operation that can be timestamped, can be timestamped at the start and + * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled + * together into drm_asahi_timestamps. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_timestamps { + /** @start: Timestamp recorded at the start of the operation */ + struct drm_asahi_timestamp start; + + /** @end: Timestamp recorded at the end of the operation */ + struct drm_asahi_timestamp end; +}; + +/** + * struct drm_asahi_helper_program - Describe helper program configuration. + * + * The helper program is a compute-like kernel required for various hardware + * functionality. Its most important role is dynamically allocating + * scratch/stack memory for individual subgroups, by partitioning a static + * allocation shared for the whole device. It is supplied by userspace via + * drm_asahi_helper_program and internally dispatched by the hardware as needed. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_helper_program { + /** + * @binary: USC address to the helper program binary. This is a tagged + * pointer with configuration in the bottom bits. + */ + __u32 binary; + + /** @cfg: Additional configuration bits for the helper program. */ + __u32 cfg; + + /** + * @data: Data passed to the helper program. This value is not + * interpreted by the kernel, firmware, or hardware in any way. It is + * simply a sideband for userspace, set with the submit ioctl and read + * via special registers inside the helper program. + * + * In practice, userspace will pass a 64-bit GPU VA here pointing to the + * actual arguments, which presumably don't fit in 64-bits. + */ + __u64 data; +}; + +/** + * struct drm_asahi_bg_eot - Describe a background or end-of-tile program. + * + * The background and end-of-tile programs are dispatched by the hardware at the + * beginning and end of rendering. As the hardware "tilebuffer" is simply local + * memory, these programs are necessary to implement API-level render targets. + * The fragment-like background program is responsible for loading either the + * clear colour or the existing render target contents, while the compute-like + * end-of-tile program stores the tilebuffer contents to memory. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_bg_eot { + /** + * @usc: USC address of the hardware USC words binding resources + * (including images and uniforms) and the program itself. Note this is + * an additional layer of indirection compared to the helper program, + * avoiding the need for a sideband for data. This is a tagged pointer + * with additional configuration in the bottom bits. + */ + __u32 usc; + + /** + * @rsrc_spec: Resource specifier for the program. This is a packed + * hardware data structure describing the required number of registers, + * uniforms, bound textures, and bound samplers. + */ + __u32 rsrc_spec; +}; + +/** + * struct drm_asahi_cmd_render - Command to submit 3D + * + * This command submits a single render pass. The hardware control stream may + * include many draws and subpasses, but within the command, the framebuffer + * dimensions and attachments are fixed. + * + * The hardware requires the firmware to set a large number of Control Registers + * setting up state at render pass granularity before each command rendering 3D. + * The firmware bundles this state into data structures. Unfortunately, we + * cannot expose either any of that directly to userspace, because the + * kernel-firmware ABI is not stable. Although we can guarantee the firmware + * updates in tandem with the kernel, we cannot break old userspace when + * upgrading the firmware and kernel. Therefore, we need to abstract well the + * data structures to avoid tying our hands with future firmwares. + * + * The bulk of drm_asahi_cmd_render therefore consists of values of hardware + * control registers, marshalled via the firmware interface. + * + * The framebuffer/tilebuffer dimensions are also specified here. In addition to + * being passed to the firmware/hardware, the kernel requires these dimensions + * to calculate various essential tiling-related data structures. It is + * unfortunate that our submits are heavier than on vendors with saner + * hardware-software interfaces. The upshot is all of this information is + * readily available to userspace with all current APIs. + * + * It looks odd - but it's not overly burdensome and it ensures we can remain + * compatible with old userspace. + */ +struct drm_asahi_cmd_render { + /** @flags: Combination of drm_asahi_render_flags flags. */ + __u32 flags; + + /** + * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the + * depth/stencil width/height, which may differ from the framebuffer + * width/height. + */ + __u32 isp_zls_pixels; + + /** + * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU + * address to the beginning of the VDM control stream. + */ + __u64 vdm_ctrl_stream_base; + + /** @vertex_helper: Helper program used for the vertex shader */ + struct drm_asahi_helper_program vertex_helper; + + /** @fragment_helper: Helper program used for the fragment shader */ + struct drm_asahi_helper_program fragment_helper; + + /** + * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an + * array of scissor descriptors indexed in the render pass. + */ + __u64 isp_scissor_base; + + /** + * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an + * array of depth bias values indexed in the render pass. + */ + __u64 isp_dbias_base; + + /** + * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an + * array of occlusion query results written by the render pass. + */ + __u64 isp_oclqry_base; + + /** @depth: Depth buffer */ + struct drm_asahi_zls_buffer depth; + + /** @stencil: Stencil buffer */ + struct drm_asahi_zls_buffer stencil; + + /** @zls_ctrl: ZLS_CTRL register value */ + __u64 zls_ctrl; + + /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */ + __u64 ppp_multisamplectl; + + /** + * @sampler_heap: Base address of the sampler heap. This heap is used + * for both vertex shaders and fragment shaders. The registers are + * per-stage, but there is no known use case for separate heaps. + */ + __u64 sampler_heap; + + /** @ppp_ctrl: PPP_CTRL register value */ + __u32 ppp_ctrl; + + /** @width_px: Framebuffer width in pixels */ + __u16 width_px; + + /** @height_px: Framebuffer height in pixels */ + __u16 height_px; + + /** @layers: Number of layers in the framebuffer */ + __u16 layers; + + /** @sampler_count: Number of samplers in the sampler heap. */ + __u16 sampler_count; + + /** @utile_width_px: Width of a logical tilebuffer tile in pixels */ + __u8 utile_width_px; + + /** @utile_height_px: Height of a logical tilebuffer tile in pixels */ + __u8 utile_height_px; + + /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */ + __u8 samples; + + /** @sample_size_B: # of bytes in the tilebuffer required per sample. */ + __u8 sample_size_B; + + /** + * @isp_merge_upper_x: 32-bit float used in the hardware triangle + * merging. Calculate as: tan(60 deg) * width. + * + * Making these values UAPI avoids requiring floating-point calculations + * in the kernel in the hot path. + */ + __u32 isp_merge_upper_x; + + /** + * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height. + * See @isp_merge_upper_x. + */ + __u32 isp_merge_upper_y; + + /** @bg: Background program run for each tile at the start */ + struct drm_asahi_bg_eot bg; + + /** @eot: End-of-tile program ran for each tile at the end */ + struct drm_asahi_bg_eot eot; + + /** + * @partial_bg: Background program ran at the start of each tile when + * resuming the render pass during a partial render. + */ + struct drm_asahi_bg_eot partial_bg; + + /** + * @partial_eot: End-of-tile program ran at the end of each tile when + * pausing the render pass during a partial render. + */ + struct drm_asahi_bg_eot partial_eot; + + /** + * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth + * buffer clear value, encoded in the depth buffer's format: either a + * 32-bit float or a 16-bit unorm (with upper bits zeroed). + */ + __u32 isp_bgobjdepth; + + /** + * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits + * contain the stencil buffer clear value. + */ + __u32 isp_bgobjvals; + + /** @ts_vtx: Timestamps for the vertex portion of the render */ + struct drm_asahi_timestamps ts_vtx; + + /** @ts_frag: Timestamps for the fragment portion of the render */ + struct drm_asahi_timestamps ts_frag; +}; + +/** + * struct drm_asahi_cmd_compute - Command to submit compute + * + * This command submits a control stream consisting of compute dispatches. There + * is essentially no limit on how many compute dispatches may be included in a + * single compute command, although timestamps are at command granularity. + */ +struct drm_asahi_cmd_compute { + /** @flags: MBZ */ + __u32 flags; + + /** @sampler_count: Number of samplers in the sampler heap. */ + __u32 sampler_count; + + /** + * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU + * address to the beginning of the CDM control stream. + */ + __u64 cdm_ctrl_stream_base; + + /** + * @cdm_ctrl_stream_end: GPU base address to the end of the hardware + * control stream. Note this only considers the first contiguous segment + * of the control stream, as the stream might jump elsewhere. + */ + __u64 cdm_ctrl_stream_end; + + /** @sampler_heap: Base address of the sampler heap. */ + __u64 sampler_heap; + + /** @helper: Helper program used for this compute command */ + struct drm_asahi_helper_program helper; + + /** @ts: Timestamps for the compute command */ + struct drm_asahi_timestamps ts; +}; + +/** + * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME + */ +struct drm_asahi_get_time { + /** @flags: MBZ. */ + __u64 flags; + + /** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */ + __u64 gpu_timestamp; +}; + +/** + * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number + * @__access: Access type. Must be R, W or RW. + * @__id: One of the DRM_ASAHI_xxx id. + * @__type: Suffix of the type being passed to the IOCTL. + * + * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx + * values instead. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define DRM_IOCTL_ASAHI(__access, __id, __type) \ + DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \ + struct drm_asahi_ ## __type) + +/* Note: this is an enum so that it can be resolved by Rust bindgen. */ +enum { + DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params), + DRM_IOCTL_ASAHI_GET_TIME = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time), + DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create), + DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy), + DRM_IOCTL_ASAHI_VM_BIND = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind), + DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create), + DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset), + DRM_IOCTL_ASAHI_GEM_BIND_OBJECT = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object), + DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create), + DRM_IOCTL_ASAHI_QUEUE_DESTROY = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy), + DRM_IOCTL_ASAHI_SUBMIT = DRM_IOCTL_ASAHI(W, SUBMIT, submit), +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _ASAHI_DRM_H_ */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 7fba37b94401..e63a71d3c607 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -905,13 +905,17 @@ struct drm_syncobj_destroy { }; #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0) +#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1) #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0) +#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1) struct drm_syncobj_handle { __u32 handle; __u32 flags; __s32 fd; __u32 pad; + + __u64 point; }; struct drm_syncobj_transfer { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 70f3b00b0681..81202a50dc9e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -421,6 +421,8 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_ARM 0x08 #define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09 #define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a +#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b +#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c /* add more to the end as needed */ @@ -1453,6 +1455,90 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) */ #define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0) +/* MediaTek modifiers + * Bits Parameter Notes + * ----- ------------------------ --------------------------------------------- + * 7: 0 TILE LAYOUT Values are MTK_FMT_MOD_TILE_* + * 15: 8 COMPRESSION Values are MTK_FMT_MOD_COMPRESS_* + * 23:16 10 BIT LAYOUT Values are MTK_FMT_MOD_10BIT_LAYOUT_* + * + */ + +#define DRM_FORMAT_MOD_MTK(__flags) fourcc_mod_code(MTK, __flags) + +/* + * MediaTek Tiled Modifier + * The lowest 8 bits of the modifier is used to specify the tiling + * layout. Only the 16L_32S tiling is used for now, but we define an + * "untiled" version and leave room for future expansion. + */ +#define MTK_FMT_MOD_TILE_MASK 0xf +#define MTK_FMT_MOD_TILE_NONE 0x0 +#define MTK_FMT_MOD_TILE_16L32S 0x1 + +/* + * Bits 8-15 specify compression options + */ +#define MTK_FMT_MOD_COMPRESS_MASK (0xf << 8) +#define MTK_FMT_MOD_COMPRESS_NONE (0x0 << 8) +#define MTK_FMT_MOD_COMPRESS_V1 (0x1 << 8) + +/* + * Bits 16-23 specify how the bits of 10 bit formats are + * stored out in memory + */ +#define MTK_FMT_MOD_10BIT_LAYOUT_MASK (0xf << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_PACKED (0x0 << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_LSBTILED (0x1 << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_LSBRASTER (0x2 << 16) + +/* alias for the most common tiling format */ +#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S) + +/* + * Apple GPU-tiled layouts. + * + * Apple GPUs support nonlinear tilings with optional lossless compression. + * + * GPU-tiled images are divided into 16KiB tiles: + * + * Bytes per pixel Tile size + * --------------- --------- + * 1 128x128 + * 2 128x64 + * 4 64x64 + * 8 64x32 + * 16 32x32 + * + * Tiles are raster-order. Pixels within a tile are interleaved (Morton order). + * + * Compressed images pad the body to 128-bytes and are immediately followed by a + * metadata section. The metadata section rounds the image dimensions to + * powers-of-two and contains 8 bytes for each 16x16 compression subtile. + * Subtiles are interleaved (Morton order). + * + * All images are 128-byte aligned. + * + * These layouts fundamentally do not have meaningful strides. No matter how we + * specify strides for these layouts, userspace unaware of Apple image layouts + * will be unable to use correctly the specified stride for any purpose. + * Userspace aware of the image layouts do not use strides. The most "correct" + * convention would be setting the image stride to 0. Unfortunately, some + * software assumes the stride is at least (width * bytes per pixel). We + * therefore require that stride equals (width * bytes per pixel). Since the + * stride is arbitrary here, we pick the simplest convention. + * + * Although containing two sections, compressed image layouts are treated in + * software as a single plane. This is modelled after AFBC, a similar + * scheme. Attempting to separate the sections to be "explicit" in DRM would + * only generate more confusion, as software does not treat the image this way. + * + * For detailed information on the hardware image layouts, see + * https://docs.mesa3d.org/drivers/asahi.html#image-layouts + */ +#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1) +#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2) + /* * AMD modifiers * diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index a35b97b097bf..2f24103f4533 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __UAPI_IVPU_DRM_H__ @@ -22,6 +22,9 @@ extern "C" { #define DRM_IVPU_METRIC_STREAMER_STOP 0x08 #define DRM_IVPU_METRIC_STREAMER_GET_DATA 0x09 #define DRM_IVPU_METRIC_STREAMER_GET_INFO 0x0a +#define DRM_IVPU_CMDQ_CREATE 0x0b +#define DRM_IVPU_CMDQ_DESTROY 0x0c +#define DRM_IVPU_CMDQ_SUBMIT 0x0d #define DRM_IOCTL_IVPU_GET_PARAM \ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param) @@ -57,6 +60,15 @@ extern "C" { DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_INFO, \ struct drm_ivpu_metric_streamer_get_data) +#define DRM_IOCTL_IVPU_CMDQ_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_CMDQ_CREATE, struct drm_ivpu_cmdq_create) + +#define DRM_IOCTL_IVPU_CMDQ_DESTROY \ + DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_CMDQ_DESTROY, struct drm_ivpu_cmdq_destroy) + +#define DRM_IOCTL_IVPU_CMDQ_SUBMIT \ + DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_CMDQ_SUBMIT, struct drm_ivpu_cmdq_submit) + /** * DOC: contexts * @@ -107,6 +119,13 @@ extern "C" { * accessible by hardware DMA. */ #define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 +/** + * DRM_IVPU_CAP_MANAGE_CMDQ + * + * Driver supports explicit command queue operations like command queue create, + * command queue destroy and submit job on specific command queue. + */ +#define DRM_IVPU_CAP_MANAGE_CMDQ 3 /** * struct drm_ivpu_param - Get/Set VPU parameters @@ -128,7 +147,7 @@ struct drm_ivpu_param { * platform type when executing on a simulator or emulator (read-only) * * %DRM_IVPU_PARAM_CORE_CLOCK_RATE: - * Current PLL frequency (read-only) + * Maximum frequency of the NPU data processing unit clock (read-only) * * %DRM_IVPU_PARAM_NUM_CONTEXTS: * Maximum number of simultaneously existing contexts (read-only) @@ -316,6 +335,44 @@ struct drm_ivpu_submit { __u32 priority; }; +/** + * struct drm_ivpu_cmdq_submit - Submit commands to the VPU using explicit command queue + * + * Execute a single command buffer on a given command queue. + * Handles to all referenced buffer objects have to be provided in @buffers_ptr. + * + * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl. + */ +struct drm_ivpu_cmdq_submit { + /** + * @buffers_ptr: + * + * A pointer to an u32 array of GEM handles of the BOs required for this job. + * The number of elements in the array must be equal to the value given by @buffer_count. + * + * The first BO is the command buffer. The rest of array has to contain all + * BOs referenced from the command buffer. + */ + __u64 buffers_ptr; + + /** @buffer_count: Number of elements in the @buffers_ptr */ + __u32 buffer_count; + + /** @cmdq_id: ID for the command queue where job will be submitted */ + __u32 cmdq_id; + + /** @flags: Reserved for future use - must be zero */ + __u32 flags; + + /** + * @commands_offset: + * + * Offset inside the first buffer in @buffers_ptr containing commands + * to be executed. The offset has to be 8-byte aligned. + */ + __u32 commands_offset; +}; + /* drm_ivpu_bo_wait job status codes */ #define DRM_IVPU_JOB_STATUS_SUCCESS 0 #define DRM_IVPU_JOB_STATUS_ABORTED 256 @@ -389,6 +446,33 @@ struct drm_ivpu_metric_streamer_get_data { }; /** + * struct drm_ivpu_cmdq_create - Create command queue for job submission + */ +struct drm_ivpu_cmdq_create { + /** @cmdq_id: Returned ID of created command queue */ + __u32 cmdq_id; + /** + * @priority: + * + * Priority to be set for related job command queue, can be one of the following: + * %DRM_IVPU_JOB_PRIORITY_DEFAULT + * %DRM_IVPU_JOB_PRIORITY_IDLE + * %DRM_IVPU_JOB_PRIORITY_NORMAL + * %DRM_IVPU_JOB_PRIORITY_FOCUS + * %DRM_IVPU_JOB_PRIORITY_REALTIME + */ + __u32 priority; +}; + +/** + * struct drm_ivpu_cmdq_destroy - Destroy a command queue + */ +struct drm_ivpu_cmdq_destroy { + /** @cmdq_id: ID of command queue to destroy */ + __u32 cmdq_id; +}; + +/** * struct drm_ivpu_metric_streamer_stop - Stop collecting metric data */ struct drm_ivpu_metric_streamer_stop { diff --git a/include/uapi/drm/nova_drm.h b/include/uapi/drm/nova_drm.h new file mode 100644 index 000000000000..3ca90ed9d2bb --- /dev/null +++ b/include/uapi/drm/nova_drm.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef __NOVA_DRM_H__ +#define __NOVA_DRM_H__ + +#include "drm.h" + +/* DISCLAIMER: Do not use, this is not a stable uAPI. + * + * This uAPI serves only testing purposes as long as this driver is still in + * development. It is required to implement and test infrastructure which is + * upstreamed in the context of this driver. See also [1]. + * + * [1] https://lore.kernel.org/dri-devel/Zfsj0_tb-0-tNrJy@cassiopeiae/T/#u + */ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * NOVA_GETPARAM_VRAM_BAR_SIZE + * + * Query the VRAM BAR size in bytes. + */ +#define NOVA_GETPARAM_VRAM_BAR_SIZE 0x1 + +/** + * struct drm_nova_getparam - query GPU and driver metadata + */ +struct drm_nova_getparam { + /** + * @param: The identifier of the parameter to query. + */ + __u64 param; + + /** + * @value: The value for the specified parameter. + */ + __u64 value; +}; + +/** + * struct drm_nova_gem_create - create a new DRM GEM object + */ +struct drm_nova_gem_create { + /** + * @handle: The handle of the new DRM GEM object. + */ + __u32 handle; + + /** + * @pad: 32 bit padding, should be 0. + */ + __u32 pad; + + /** + * @size: The size of the new DRM GEM object. + */ + __u64 size; +}; + +/** + * struct drm_nova_gem_info - query DRM GEM object metadata + */ +struct drm_nova_gem_info { + /** + * @handle: The handle of the DRM GEM object to query. + */ + __u32 handle; + + /** + * @pad: 32 bit padding, should be 0. + */ + __u32 pad; + + /** + * @size: The size of the DRM GEM obejct. + */ + __u64 size; +}; + +#define DRM_NOVA_GETPARAM 0x00 +#define DRM_NOVA_GEM_CREATE 0x01 +#define DRM_NOVA_GEM_INFO 0x02 + +/* Note: this is an enum so that it can be resolved by Rust bindgen. */ +enum { + DRM_IOCTL_NOVA_GETPARAM = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GETPARAM, + struct drm_nova_getparam), + DRM_IOCTL_NOVA_GEM_CREATE = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_CREATE, + struct drm_nova_gem_create), + DRM_IOCTL_NOVA_GEM_INFO = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_INFO, + struct drm_nova_gem_info), +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* __NOVA_DRM_H__ */ diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index b99763cbae48..ad9a70afea6c 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -127,49 +127,10 @@ enum drm_panthor_ioctl_id { /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */ DRM_PANTHOR_TILER_HEAP_DESTROY, -}; -/** - * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number - * @__access: Access type. Must be R, W or RW. - * @__id: One of the DRM_PANTHOR_xxx id. - * @__type: Suffix of the type being passed to the IOCTL. - * - * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx - * values instead. - * - * Return: An IOCTL number to be passed to ioctl() from userspace. - */ -#define DRM_IOCTL_PANTHOR(__access, __id, __type) \ - DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \ - struct drm_panthor_ ## __type) - -#define DRM_IOCTL_PANTHOR_DEV_QUERY \ - DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query) -#define DRM_IOCTL_PANTHOR_VM_CREATE \ - DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create) -#define DRM_IOCTL_PANTHOR_VM_DESTROY \ - DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy) -#define DRM_IOCTL_PANTHOR_VM_BIND \ - DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind) -#define DRM_IOCTL_PANTHOR_VM_GET_STATE \ - DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state) -#define DRM_IOCTL_PANTHOR_BO_CREATE \ - DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create) -#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \ - DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset) -#define DRM_IOCTL_PANTHOR_GROUP_CREATE \ - DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create) -#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \ - DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy) -#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \ - DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit) -#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \ - DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state) -#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \ - DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create) -#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \ - DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy) + /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */ + DRM_PANTHOR_BO_SET_LABEL, +}; /** * DOC: IOCTL arguments @@ -1019,6 +980,70 @@ struct drm_panthor_tiler_heap_destroy { __u32 pad; }; +/** + * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL + */ +struct drm_panthor_bo_set_label { + /** @handle: Handle of the buffer object to label. */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** + * @label: User pointer to a NUL-terminated string + * + * Length cannot be greater than 4096 + */ + __u64 label; +}; + +/** + * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number + * @__access: Access type. Must be R, W or RW. + * @__id: One of the DRM_PANTHOR_xxx id. + * @__type: Suffix of the type being passed to the IOCTL. + * + * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx + * values instead. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define DRM_IOCTL_PANTHOR(__access, __id, __type) \ + DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \ + struct drm_panthor_ ## __type) + +enum { + DRM_IOCTL_PANTHOR_DEV_QUERY = + DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query), + DRM_IOCTL_PANTHOR_VM_CREATE = + DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create), + DRM_IOCTL_PANTHOR_VM_DESTROY = + DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy), + DRM_IOCTL_PANTHOR_VM_BIND = + DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind), + DRM_IOCTL_PANTHOR_VM_GET_STATE = + DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state), + DRM_IOCTL_PANTHOR_BO_CREATE = + DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create), + DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET = + DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset), + DRM_IOCTL_PANTHOR_GROUP_CREATE = + DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create), + DRM_IOCTL_PANTHOR_GROUP_DESTROY = + DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy), + DRM_IOCTL_PANTHOR_GROUP_SUBMIT = + DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit), + DRM_IOCTL_PANTHOR_GROUP_GET_STATE = + DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state), + DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE = + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create), + DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY = + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy), + DRM_IOCTL_PANTHOR_BO_SET_LABEL = + DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label), +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index c2ce71987e9b..9debb320c34b 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -163,6 +163,12 @@ struct drm_virtgpu_3d_wait { __u32 flags; }; +#define VIRTGPU_DRM_CAPSET_VIRGL 1 +#define VIRTGPU_DRM_CAPSET_VIRGL2 2 +#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3 +#define VIRTGPU_DRM_CAPSET_VENUS 4 +#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5 +#define VIRTGPU_DRM_CAPSET_DRM 6 struct drm_virtgpu_get_caps { __u32 cap_set_id; __u32 cap_set_ver; diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index f62689ca861a..6a702ba7817c 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions { * * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device * has usable VRAM + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device + * has low latency hint support + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the + * device has CPU address mirroring support * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment * required by this device, typically SZ_4K or SZ_64K * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address @@ -409,6 +413,8 @@ struct drm_xe_query_config { #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 #define DRM_XE_QUERY_CONFIG_FLAGS 1 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1) + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2) #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 #define DRM_XE_QUERY_CONFIG_VA_BITS 3 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 @@ -630,6 +636,39 @@ struct drm_xe_query_uc_fw_version { }; /** + * struct drm_xe_query_pxp_status - query if PXP is ready + * + * If PXP is enabled and no fatal error has occurred, the status will be set to + * one of the following values: + * 0: PXP init still in progress + * 1: PXP init complete + * + * If PXP is not enabled or something has gone wrong, the query will be failed + * with one of the following error codes: + * -ENODEV: PXP not supported or disabled; + * -EIO: fatal error occurred during init, so PXP will never be enabled; + * -EINVAL: incorrect value provided as part of the query; + * -EFAULT: error copying the memory between kernel and userspace. + * + * The status can only be 0 in the first few seconds after driver load. If + * everything works as expected, the status will transition to init complete in + * less than 1 second, while in case of errors the driver might take longer to + * start returning an error code, but it should still take less than 10 seconds. + * + * The supported session type bitmask is based on the values in + * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore + * is not reported in the bitmask. + * + */ +struct drm_xe_query_pxp_status { + /** @status: current PXP status */ + __u32 status; + + /** @supported_session_types: bitmask of supported PXP session types */ + __u32 supported_session_types; +}; + +/** * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main * structure to query device information * @@ -648,6 +687,7 @@ struct drm_xe_query_uc_fw_version { * attributes. * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES + * - %DRM_XE_DEVICE_QUERY_PXP_STATUS * * If size is set to 0, the driver fills it with the required size for * the requested type of data to query. If size is equal to the required @@ -700,6 +740,8 @@ struct drm_xe_device_query { #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7 #define DRM_XE_DEVICE_QUERY_OA_UNITS 8 +#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9 +#define DRM_XE_DEVICE_QUERY_EU_STALL 10 /** @query: The type of data to query */ __u32 query; @@ -743,8 +785,23 @@ struct drm_xe_device_query { * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This * is uncached. Scanout surfaces should likely use this. All objects * that can be placed in VRAM must use this. + * + * This ioctl supports setting the following properties via the + * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the + * generic @drm_xe_ext_set_property struct: + * + * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session + * this object will be used with. Valid values are listed in enum + * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so + * there is no need to explicitly set that. Objects used with session of type + * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation + * event occurs after their creation. Attempting to flip an invalid object + * will cause a black frame to be displayed instead. Submissions with invalid + * objects mapped in the VM will be rejected. */ struct drm_xe_gem_create { +#define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; @@ -811,6 +868,32 @@ struct drm_xe_gem_create { /** * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET + * + * The @flags can be: + * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset + * for use in mmap ioctl. Writing to the returned mmap address will generate a + * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing + * to VRAM which would also add overhead), acting like an MI_MEM_FENCE + * instruction. + * + * Note: The mmap size can be at most 4K, due to HW limitations. As a result + * this interface is only supported on CPU architectures that support 4K page + * size. The mmap_offset ioctl will detect this and gracefully return an + * error, where userspace is expected to have a different fallback method for + * triggering a barrier. + * + * Roughly the usage would be as follows: + * + * .. code-block:: C + * + * struct drm_xe_gem_mmap_offset mmo = { + * .handle = 0, // must be set to 0 + * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, + * }; + * + * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); + * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); + * map[i] = 0xdeadbeaf; // issue barrier */ struct drm_xe_gem_mmap_offset { /** @extensions: Pointer to the first extension struct, if any */ @@ -819,7 +902,8 @@ struct drm_xe_gem_mmap_offset { /** @handle: Handle for the object being mapped. */ __u32 handle; - /** @flags: Must be zero */ +#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0) + /** @flags: Flags */ __u32 flags; /** @offset: The fake offset to use for subsequent mmap call */ @@ -833,7 +917,11 @@ struct drm_xe_gem_mmap_offset { * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE * * The @flags can be: - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address + * space of the VM to scratch page. A vm_bind would overwrite the scratch + * page mapping. This flag is mutually exclusive with the + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and + * xe3 platform. * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts * exec submissions to its exec_queues that don't have an upper time * limit on the job execution time. But exec submissions to these @@ -906,6 +994,15 @@ struct drm_xe_vm_destroy { * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO * handle MBZ, and the BO offset MBZ. This flag is intended to * implement VK sparse bindings. + * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP, + * reject the binding if the encryption key is no longer valid. This + * flag has no effect on BOs that are not marked as using PXP. + * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is + * set, no mappings are created rather the range is reserved for CPU address + * mirroring which will be populated on GPU page faults or prefetches. Only + * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address + * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO + * handle MBZ, and the BO offset MBZ. */ struct drm_xe_vm_bind_op { /** @extensions: Pointer to the first extension struct, if any */ @@ -958,7 +1055,9 @@ struct drm_xe_vm_bind_op { * on the @pat_index. For such mappings there is no actual memory being * mapped (the address in the PTE is invalid), so the various PAT memory * attributes likely do not apply. Simply leaving as zero is one - * option (still a valid pat_index). + * option (still a valid pat_index). Same applies to + * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping + * there is no actual memory being mapped. */ __u16 pat_index; @@ -974,6 +1073,14 @@ struct drm_xe_vm_bind_op { /** @userptr: user pointer to bind on */ __u64 userptr; + + /** + * @cpu_addr_mirror_offset: Offset from GPU @addr to create + * CPU address mirror mappings. MBZ with current level of + * support (e.g. 1 to 1 mapping between GPU and CPU mappings + * only supported). + */ + __s64 cpu_addr_mirror_offset; }; /** @@ -996,6 +1103,8 @@ struct drm_xe_vm_bind_op { #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) +#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) +#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5) /** @flags: Bind flags */ __u32 flags; @@ -1087,6 +1196,29 @@ struct drm_xe_vm_bind { /** * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE * + * This ioctl supports setting the following properties via the + * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the + * generic @drm_xe_ext_set_property struct: + * + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority. + * CAP_SYS_NICE is required to set a value above normal. + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice + * duration in microseconds. + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session + * this queue will be used with. Valid values are listed in enum + * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so + * there is no need to explicitly set that. When a queue of type + * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session + * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running. + * The user is expected to query the PXP status via the query ioctl (see + * %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before + * attempting to create a queue with this property. When a queue is created + * before PXP is ready, the ioctl will return -EBUSY if init is still in + * progress or -EIO if init failed. + * Given that going into a power-saving state kills PXP HWDRM sessions, + * runtime PM will be blocked while queues of this type are alive. + * All PXP queues will be killed if a PXP invalidation event occurs. + * * The example below shows how to use @drm_xe_exec_queue_create to create * a simple exec_queue (no parallel submission) of class * &DRM_XE_ENGINE_CLASS_RENDER. @@ -1105,12 +1237,27 @@ struct drm_xe_vm_bind { * }; * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); * + * Allow users to provide a hint to kernel for cases demanding low latency + * profile. Please note it will have impact on power consumption. User can + * indicate low latency hint with flag while creating exec queue as + * mentioned below, + * + * struct drm_xe_exec_queue_create exec_queue_create = { + * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT, + * .extensions = 0, + * .vm_id = vm, + * .num_bb_per_exec = 1, + * .num_eng_per_bb = 1, + * .instances = to_user_pointer(&instance), + * }; + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); + * */ struct drm_xe_exec_queue_create { #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 - +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; @@ -1123,7 +1270,8 @@ struct drm_xe_exec_queue_create { /** @vm_id: VM to use for this exec queue */ __u32 vm_id; - /** @flags: MBZ */ +#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0) + /** @flags: flags to use for this exec queue */ __u32 flags; /** @exec_queue_id: Returned exec queue ID */ @@ -1397,6 +1545,8 @@ struct drm_xe_wait_user_fence { enum drm_xe_observation_type { /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ DRM_XE_OBSERVATION_TYPE_OA, + /** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */ + DRM_XE_OBSERVATION_TYPE_EU_STALL, }; /** @@ -1729,6 +1879,97 @@ struct drm_xe_oa_stream_info { __u64 reserved[3]; }; +/** + * enum drm_xe_pxp_session_type - Supported PXP session types. + * + * We currently only support HWDRM sessions, which are used for protected + * content that ends up being displayed, but the HW supports multiple types, so + * we might extend support in the future. + */ +enum drm_xe_pxp_session_type { + /** @DRM_XE_PXP_TYPE_NONE: PXP not used */ + DRM_XE_PXP_TYPE_NONE = 0, + /** + * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends + * up on the display. + */ + DRM_XE_PXP_TYPE_HWDRM = 1, +}; + +/* ID of the protected content session managed by Xe when PXP is active */ +#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf + +/** + * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids. + * + * These properties are passed to the driver at open as a chain of + * @drm_xe_ext_set_property structures with @property set to these + * properties' enums and @value set to the corresponding values of these + * properties. @drm_xe_user_extension base.name should be set to + * @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY. + * + * With the file descriptor obtained from open, user space must enable + * the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before + * calling read(). EIO errno from read() indicates HW dropped data + * due to full buffer. + */ +enum drm_xe_eu_stall_property_id { +#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0 + /** + * @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which + * EU stall data will be captured. + */ + DRM_XE_EU_STALL_PROP_GT_ID = 1, + + /** + * @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in + * GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall + */ + DRM_XE_EU_STALL_PROP_SAMPLE_RATE, + + /** + * @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of + * EU stall data reports to be present in the kernel buffer + * before unblocking a blocked poll or read. + */ + DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS, +}; + +/** + * struct drm_xe_query_eu_stall - Information about EU stall sampling. + * + * If a query is made with a struct @drm_xe_device_query where .query + * is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses + * struct @drm_xe_query_eu_stall in .data. + */ +struct drm_xe_query_eu_stall { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @capabilities: EU stall capabilities bit-mask */ + __u64 capabilities; +#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0) + + /** @record_size: size of each EU stall data record */ + __u64 record_size; + + /** @per_xecore_buf_size: internal per XeCore buffer size */ + __u64 per_xecore_buf_size; + + /** @reserved: Reserved */ + __u64 reserved[5]; + + /** @num_sampling_rates: Number of sampling rates in @sampling_rates array */ + __u64 num_sampling_rates; + + /** + * @sampling_rates: Flexible array of sampling rates + * sorted in the fastest to slowest order. + * Sampling rates are specified in GPU clock cycles. + */ + __u64 sampling_rates[]; +}; + #if defined(__cplusplus) } #endif |