summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-03-02 09:57:26 +0300
committerDave Airlie <airlied@redhat.com>2026-03-02 09:58:07 +0300
commit21f6bcdf2be72e750dbe430b7227a66763a58466 (patch)
treebdf313b0fc97b968f650f490bd1d667fbd20eb08 /include
parent11439c4635edd669ae435eec308f4ab8a0804808 (diff)
parent2622649ad6cdbb3e77bfafc8c0fe686090b77f70 (diff)
downloadlinux-21f6bcdf2be72e750dbe430b7227a66763a58466.tar.xz
Merge tag 'drm-misc-next-2026-02-26' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v7.1: UAPI Changes: connector: - Add panel_type property fourcc: - Add ARM interleaved 64k modifier nouveau: - Query Z-Cull info with DRM_IOCTL_NOUVEAU_GET_ZCULL_INFO Cross-subsystem Changes: coreboot: - Clean up coreboot framebuffer support dma-buf: - Provide revoke mechanism for shared buffers - Rename move_notify callback to invalidate_mappings and update users. - Always enable move_notify - Support dma_fence_was_initialized() test - Protect dma_fence_ops by RCU and improve locking - Fix sparse warnings Core Changes: atomic: - Allocate drm_private_state via callback and convert drivers atomic-helper: - Use system_percpu_wq buddy: - Make buddy allocator available to all DRM drivers - Document flags and structures colorop: - Add destroy helper and convert drivers fbdev-emulation: - Clean up gem: - Fix drm_gem_objects_lookup() error cleanup Driver Changes: amdgpu: - Set panel_type to OELD for eDP atmel-hlcdc: - Support sana5d65 LCD controller bridge: - anx7625: Support USB-C plus DT bindings - connector: Fix EDID detection - dw-hdmi-qp: Support Vendor-Specfic and SDP Infoframes; improve others - fsl-ldb: Fix visual artifacts plus related DT property 'enable-termination-resistor' - imx8qxp-pixel-link: Improve bridge reference handling - lt9611: Support Port-B-only input plus DT bindings - tda998x: Support DRM_BRIDGE_ATTACH_NO_CONNECTOR; Clean up - Support TH1520 HDMI plus DT bindings - Clean up imagination: - Clean up komeda: - Fix integer overflow in AFBC checks mcde: - Improve bridge handling nouveau: - Provide Z-cull info to user space - gsp: Support GA100 - Shutdown on PCI device shutdown - Clean up panel: - panel-jdi-lt070me05000: Use mipi-dsi multi functions - panel-edp: Support Add AUO B116XAT04.1 (HW: 1A); Support CMN N116BCL-EAK (C2); Support FriendlyELEC plus DT changes - Fix Kconfig dependencies panthor: - Add tracepoints for power and IRQs rcar-du: - dsi: fix VCLK calculation rockchip: - vop2: Use drm_ logging functions - Support DisplayPort on RK3576 sysfb: - corebootdrm: Support system framebuffer on coreboot firmware; detect orientation - Clean up pixel-format lookup sun4i: - Clean up tilcdc: - Use DT bindings scheme - Use managed DRM interfaces - Support DRM_BRIDGE_ATTACH_NO_CONNECTOR - Clean up a lot of obsolete code v3d: - Clean up vc4: - Use system_percpu_wq - Clean up verisilicon: - Support DC8200 plus DT bindings virtgpu: - Support PRIME imports with enabled 3D Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patch.msgid.link/20260226143615.GA47200@linux.fritz.box
Diffstat (limited to 'include')
-rw-r--r--include/drm/bridge/dw_dp.h7
-rw-r--r--include/drm/drm_atomic.h21
-rw-r--r--include/drm/drm_atomic_state_helper.h3
-rw-r--r--include/drm/drm_buddy.h163
-rw-r--r--include/drm/drm_colorop.h32
-rw-r--r--include/drm/drm_connector.h1
-rw-r--r--include/drm/drm_fb_helper.h105
-rw-r--r--include/drm/drm_mode_config.h4
-rw-r--r--include/linux/coreboot.h90
-rw-r--r--include/linux/dma-buf.h17
-rw-r--r--include/linux/dma-fence-array.h1
-rw-r--r--include/linux/dma-fence-chain.h1
-rw-r--r--include/linux/dma-fence.h97
-rw-r--r--include/linux/gpu_buddy.h239
-rw-r--r--include/trace/events/dma_fence.h35
-rw-r--r--include/uapi/drm/drm_fourcc.h16
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/nouveau_drm.h66
18 files changed, 581 insertions, 321 deletions
diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h
index d05df49fd884..25363541e69d 100644
--- a/include/drm/bridge/dw_dp.h
+++ b/include/drm/bridge/dw_dp.h
@@ -11,8 +11,15 @@
struct drm_encoder;
struct dw_dp;
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
struct dw_dp_plat_data {
u32 max_link_rate;
+ u8 pixel_mode;
};
struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 178f8f62c80f..0b1b32bcd2bd 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -262,6 +262,19 @@ struct drm_private_state;
*/
struct drm_private_state_funcs {
/**
+ * @atomic_create_state:
+ *
+ * Allocates a pristine, initialized, state for the private
+ * object and returns it.
+ *
+ * RETURNS:
+ *
+ * A new, pristine, private state instance or an error pointer
+ * on failure.
+ */
+ struct drm_private_state *(*atomic_create_state)(struct drm_private_obj *obj);
+
+ /**
* @atomic_duplicate_state:
*
* Duplicate the current state of the private object and return it. It
@@ -723,10 +736,10 @@ struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
-void drm_atomic_private_obj_init(struct drm_device *dev,
- struct drm_private_obj *obj,
- struct drm_private_state *state,
- const struct drm_private_state_funcs *funcs);
+int drm_atomic_private_obj_init(struct drm_device *dev,
+ struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs);
void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
struct drm_private_state * __must_check
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index b9740edb2658..900672c6ea90 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -84,6 +84,9 @@ void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
+
+void __drm_atomic_helper_private_obj_create_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state);
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index b909fa8f810a..3054369bebff 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -6,166 +6,13 @@
#ifndef __DRM_BUDDY_H__
#define __DRM_BUDDY_H__
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/rbtree.h>
+#include <linux/gpu_buddy.h>
struct drm_printer;
-#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
-#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
-#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
-#define DRM_BUDDY_CLEARED BIT(4)
-#define DRM_BUDDY_TRIM_DISABLE BIT(5)
-
-struct drm_buddy_block {
-#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
-#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
-#define DRM_BUDDY_ALLOCATED (1 << 10)
-#define DRM_BUDDY_FREE (2 << 10)
-#define DRM_BUDDY_SPLIT (3 << 10)
-#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
-/* Free to be used, if needed in the future */
-#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
-#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
- u64 header;
-
- struct drm_buddy_block *left;
- struct drm_buddy_block *right;
- struct drm_buddy_block *parent;
-
- void *private; /* owned by creator */
-
- /*
- * While the block is allocated by the user through drm_buddy_alloc*,
- * the user has ownership of the link, for example to maintain within
- * a list, if so desired. As soon as the block is freed with
- * drm_buddy_free* ownership is given back to the mm.
- */
- union {
- struct rb_node rb;
- struct list_head link;
- };
-
- struct list_head tmp_link;
-};
-
-/* Order-zero must be at least SZ_4K */
-#define DRM_BUDDY_MAX_ORDER (63 - 12)
-
-/*
- * Binary Buddy System.
- *
- * Locking should be handled by the user, a simple mutex around
- * drm_buddy_alloc* and drm_buddy_free* should suffice.
- */
-struct drm_buddy {
- /* Maintain a free list for each order. */
- struct rb_root **free_trees;
-
- /*
- * Maintain explicit binary tree(s) to track the allocation of the
- * address space. This gives us a simple way of finding a buddy block
- * and performing the potentially recursive merge step when freeing a
- * block. Nodes are either allocated or free, in which case they will
- * also exist on the respective free list.
- */
- struct drm_buddy_block **roots;
-
- /*
- * Anything from here is public, and remains static for the lifetime of
- * the mm. Everything above is considered do-not-touch.
- */
- unsigned int n_roots;
- unsigned int max_order;
-
- /* Must be at least SZ_4K */
- u64 chunk_size;
- u64 size;
- u64 avail;
- u64 clear_avail;
-};
-
-static inline u64
-drm_buddy_block_offset(const struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_OFFSET;
-}
-
-static inline unsigned int
-drm_buddy_block_order(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_ORDER;
-}
-
-static inline unsigned int
-drm_buddy_block_state(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_STATE;
-}
-
-static inline bool
-drm_buddy_block_is_allocated(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
-}
-
-static inline bool
-drm_buddy_block_is_clear(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_CLEAR;
-}
-
-static inline bool
-drm_buddy_block_is_free(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
-}
-
-static inline bool
-drm_buddy_block_is_split(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
-}
-
-static inline u64
-drm_buddy_block_size(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- return mm->chunk_size << drm_buddy_block_order(block);
-}
-
-int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
-
-void drm_buddy_fini(struct drm_buddy *mm);
-
-struct drm_buddy_block *
-drm_get_buddy(struct drm_buddy_block *block);
-
-int drm_buddy_alloc_blocks(struct drm_buddy *mm,
- u64 start, u64 end, u64 size,
- u64 min_page_size,
- struct list_head *blocks,
- unsigned long flags);
-
-int drm_buddy_block_trim(struct drm_buddy *mm,
- u64 *start,
- u64 new_size,
- struct list_head *blocks);
-
-void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
-
-void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
-
-void drm_buddy_free_list(struct drm_buddy *mm,
- struct list_head *objects,
- unsigned int flags);
-
-void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
-void drm_buddy_block_print(struct drm_buddy *mm,
- struct drm_buddy_block *block,
+/* DRM-specific GPU Buddy Allocator print helpers */
+void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block,
struct drm_printer *p);
#endif
diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h
index a3a32f9f918c..bd082854ca74 100644
--- a/include/drm/drm_colorop.h
+++ b/include/drm/drm_colorop.h
@@ -188,6 +188,19 @@ struct drm_colorop_state {
};
/**
+ * struct drm_colorop_funcs - driver colorop control functions
+ */
+struct drm_colorop_funcs {
+ /**
+ * @destroy:
+ *
+ * Clean up colorop resources. This is called at driver unload time
+ * through drm_mode_config_cleanup()
+ */
+ void (*destroy)(struct drm_colorop *colorop);
+};
+
+/**
* struct drm_colorop - DRM color operation control structure
*
* A colorop represents one color operation. They can be chained via
@@ -362,6 +375,8 @@ struct drm_colorop {
*/
struct drm_property *next_property;
+ /** @funcs: colorop control functions */
+ const struct drm_colorop_funcs *funcs;
};
#define obj_to_colorop(x) container_of(x, struct drm_colorop, base)
@@ -390,17 +405,22 @@ void drm_colorop_pipeline_destroy(struct drm_device *dev);
void drm_colorop_cleanup(struct drm_colorop *colorop);
int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, u64 supported_tfs, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ u64 supported_tfs, uint32_t flags);
int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t lut_size,
+ struct drm_plane *plane,
+ const struct drm_colorop_funcs *funcs,
+ uint32_t lut_size,
enum drm_colorop_lut1d_interpolation_type interpolation,
uint32_t flags);
int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ uint32_t flags);
int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ uint32_t flags);
int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane,
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
uint32_t lut_size,
enum drm_colorop_lut3d_interpolation_type interpolation,
uint32_t flags);
@@ -420,6 +440,8 @@ void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
*/
void drm_colorop_reset(struct drm_colorop *colorop);
+void drm_colorop_destroy(struct drm_colorop *colorop);
+
/**
* drm_colorop_index - find the index of a registered colorop
* @colorop: colorop to find index for
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 7eaec37ae1c7..c18be8c19de0 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -2493,6 +2493,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+void drm_connector_attach_panel_type_property(struct drm_connector *connector);
int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector);
int drm_connector_attach_colorspace_property(struct drm_connector *connector);
int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 05cca77b7249..15274b8a1d97 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,111 +271,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
-#else
-static inline void drm_fb_helper_prepare(struct drm_device *dev,
- struct drm_fb_helper *helper,
- unsigned int preferred_bpp,
- const struct drm_fb_helper_funcs *funcs)
-{
-}
-
-static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
-{
-}
-
-static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper)
-{
- /* So drivers can use it to free the struct */
- helper->dev = dev;
- dev->fb_helper = helper;
-
- return 0;
-}
-
-static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
-{
- if (helper && helper->dev)
- helper->dev->fb_helper = NULL;
-}
-
-static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_set_par(struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
-{
-}
-
-static inline void
-drm_fb_helper_fill_info(struct fb_info *info,
- struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
-}
-
-static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- return 0;
-}
-
-#ifdef CONFIG_FB_DEFERRED_IO
-static inline void drm_fb_helper_deferred_io(struct fb_info *info,
- struct list_head *pagelist)
-{
-}
-#endif
-
-static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
- bool suspend)
-{
-}
-
-static inline void
-drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
-{
-}
-
-static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
#endif
#endif
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 895fb820dba0..5e1dd0cfccde 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -601,6 +601,10 @@ struct drm_mode_config {
*/
struct drm_property *tile_property;
/**
+ * @panel_type_property: Default connector property for panel type
+ */
+ struct drm_property *panel_type_property;
+ /**
* @link_status_property: Default connector property for link status
* of a connector
*/
diff --git a/include/linux/coreboot.h b/include/linux/coreboot.h
new file mode 100644
index 000000000000..5d40ca7a1d89
--- /dev/null
+++ b/include/linux/coreboot.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * coreboot.h
+ *
+ * Coreboot device and driver interfaces.
+ *
+ * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _LINUX_COREBOOT_H
+#define _LINUX_COREBOOT_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef __aligned(4) u64 cb_u64;
+
+/* List of coreboot entry structures that is used */
+
+#define CB_TAG_FRAMEBUFFER 0x12
+#define LB_TAG_CBMEM_ENTRY 0x31
+
+/* Generic */
+struct coreboot_table_entry {
+ u32 tag;
+ u32 size;
+};
+
+/* Points to a CBMEM entry */
+struct lb_cbmem_ref {
+ u32 tag;
+ u32 size;
+
+ cb_u64 cbmem_addr;
+};
+
+/* Corresponds to LB_TAG_CBMEM_ENTRY */
+struct lb_cbmem_entry {
+ u32 tag;
+ u32 size;
+
+ cb_u64 address;
+ u32 entry_size;
+ u32 id;
+};
+
+#define LB_FRAMEBUFFER_ORIENTATION_NORMAL 0
+#define LB_FRAMEBUFFER_ORIENTATION_BOTTOM_UP 1
+#define LB_FRAMEBUFFER_ORIENTATION_LEFT_UP 2
+#define LB_FRAMEBUFFER_ORIENTATION_RIGHT_UP 3
+
+/* Describes framebuffer setup by coreboot */
+struct lb_framebuffer {
+ u32 tag;
+ u32 size;
+
+ cb_u64 physical_address;
+ u32 x_resolution;
+ u32 y_resolution;
+ u32 bytes_per_line;
+ u8 bits_per_pixel;
+ u8 red_mask_pos;
+ u8 red_mask_size;
+ u8 green_mask_pos;
+ u8 green_mask_size;
+ u8 blue_mask_pos;
+ u8 blue_mask_size;
+ u8 reserved_mask_pos;
+ u8 reserved_mask_size;
+ u8 orientation;
+};
+
+/*
+ * True if the coreboot-provided data is large enough to hold information
+ * on the linear framebuffer. False otherwise.
+ */
+#define LB_FRAMEBUFFER_HAS_LFB(__fb) \
+ ((__fb)->size >= offsetofend(struct lb_framebuffer, reserved_mask_size))
+
+/*
+ * True if the coreboot-provided data is large enough to hold information
+ * on the display orientation. False otherwise.
+ */
+#define LB_FRAMEBUFFER_HAS_ORIENTATION(__fb) \
+ ((__fb)->size >= offsetofend(struct lb_framebuffer, orientation))
+
+#endif /* _LINUX_COREBOOT_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 133b9e637b55..166933b82e27 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -407,7 +407,7 @@ struct dma_buf {
* through the device.
*
* - Dynamic importers should set fences for any access that they can't
- * disable immediately from their &dma_buf_attach_ops.move_notify
+ * disable immediately from their &dma_buf_attach_ops.invalidate_mappings
* callback.
*
* IMPORTANT:
@@ -446,7 +446,7 @@ struct dma_buf_attach_ops {
bool allow_peer2peer;
/**
- * @move_notify: [optional] notification that the DMA-buf is moving
+ * @invalidate_mappings: [optional] notification that the DMA-buf is moving
*
* If this callback is provided the framework can avoid pinning the
* backing store while mappings exists.
@@ -456,14 +456,10 @@ struct dma_buf_attach_ops {
* called with this lock held as well. This makes sure that no mapping
* is created concurrently with an ongoing move operation.
*
- * Mappings stay valid and are not directly affected by this callback.
- * But the DMA-buf can now be in a different physical location, so all
- * mappings should be destroyed and re-created as soon as possible.
- *
- * New mappings can be created after this callback returns, and will
- * point to the new location of the DMA-buf.
+ * See the kdoc for dma_buf_invalidate_mappings() for details on the
+ * required behavior.
*/
- void (*move_notify)(struct dma_buf_attachment *attach);
+ void (*invalidate_mappings)(struct dma_buf_attachment *attach);
};
/**
@@ -578,7 +574,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
-void dma_buf_move_notify(struct dma_buf *dma_buf);
+void dma_buf_invalidate_mappings(struct dma_buf *dma_buf);
+bool dma_buf_attach_revocable(struct dma_buf_attachment *attach);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 079b3dec0a16..370b3d2bba37 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -38,7 +38,6 @@ struct dma_fence_array_cb {
struct dma_fence_array {
struct dma_fence base;
- spinlock_t lock;
unsigned num_fences;
atomic_t num_pending;
struct dma_fence **fences;
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 5cd3ba53b4a1..df3beadf1515 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -46,7 +46,6 @@ struct dma_fence_chain {
*/
struct irq_work work;
};
- spinlock_t lock;
};
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index d4c92fd35092..3dc93f068bf6 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -34,7 +34,8 @@ struct seq_file;
* @ops: dma_fence_ops associated with this fence
* @rcu: used for releasing fence with kfree_rcu
* @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
+ * @extern_lock: external spin_lock_irqsave used for locking (deprecated)
+ * @inline_lock: alternative internal spin_lock_irqsave used for locking
* @context: execution context this fence belongs to, returned by
* dma_fence_context_alloc()
* @seqno: the sequence number of this fence inside the execution context,
@@ -48,6 +49,8 @@ struct seq_file;
* atomic ops (bit_*), so taking the spinlock will not be needed most
* of the time.
*
+ * DMA_FENCE_FLAG_INITIALIZED_BIT - fence was initialized
+ * DMA_FENCE_FLAG_INLINE_LOCK_BIT - use inline spinlock instead of external one
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
* DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
@@ -65,8 +68,11 @@ struct seq_file;
* been completed, or never called at all.
*/
struct dma_fence {
- spinlock_t *lock;
- const struct dma_fence_ops *ops;
+ union {
+ spinlock_t *extern_lock;
+ spinlock_t inline_lock;
+ };
+ const struct dma_fence_ops __rcu *ops;
/*
* We clear the callback list on kref_put so that by the time we
* release the fence it is unused. No one should be adding to the
@@ -98,6 +104,8 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_INITIALIZED_BIT,
+ DMA_FENCE_FLAG_INLINE_LOCK_BIT,
DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
@@ -218,6 +226,10 @@ struct dma_fence_ops {
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
+ *
+ * Implementing this callback prevents the fence from detaching after
+ * signaling and so it is necessary for the module providing the
+ * dma_fence_ops to stay loaded as long as the dma_fence exists.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -229,6 +241,13 @@ struct dma_fence_ops {
* Can be called from irq context. This callback is optional. If it is
* NULL, then dma_fence_free() is instead called as the default
* implementation.
+ *
+ * Implementing this callback prevents the fence from detaching after
+ * signaling and so it is necessary for the module providing the
+ * dma_fence_ops to stay loaded as long as the dma_fence exists.
+ *
+ * If the callback is implemented the memory backing the dma_fence
+ * object must be freed RCU safe.
*/
void (*release)(struct dma_fence *fence);
@@ -264,6 +283,19 @@ void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
+ * dma_fence_was_initialized - test if fence was initialized
+ * @fence: fence to test
+ *
+ * Return: True if fence was ever initialized, false otherwise. Works correctly
+ * only when memory backing the fence structure is zero initialized on
+ * allocation.
+ */
+static inline bool dma_fence_was_initialized(struct dma_fence *fence)
+{
+ return fence && test_bit(DMA_FENCE_FLAG_INITIALIZED_BIT, &fence->flags);
+}
+
+/**
* dma_fence_put - decreases refcount of the fence
* @fence: fence to reduce refcount of
*/
@@ -351,6 +383,45 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
+/**
+ * dma_fence_spinlock - return pointer to the spinlock protecting the fence
+ * @fence: the fence to get the lock from
+ *
+ * Return either the pointer to the embedded or the external spin lock.
+ */
+static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
+{
+ return test_bit(DMA_FENCE_FLAG_INLINE_LOCK_BIT, &fence->flags) ?
+ &fence->inline_lock : fence->extern_lock;
+}
+
+/**
+ * dma_fence_lock_irqsave - irqsave lock the fence
+ * @fence: the fence to lock
+ * @flags: where to store the CPU flags.
+ *
+ * Lock the fence, preventing it from changing to the signaled state.
+ */
+#define dma_fence_lock_irqsave(fence, flags) \
+ spin_lock_irqsave(dma_fence_spinlock(fence), flags)
+
+/**
+ * dma_fence_unlock_irqrestore - unlock the fence and irqrestore
+ * @fence: the fence to unlock
+ * @flags the CPU flags to restore
+ *
+ * Unlock the fence, allowing it to change it's state to signaled again.
+ */
+#define dma_fence_unlock_irqrestore(fence, flags) \
+ spin_unlock_irqrestore(dma_fence_spinlock(fence), flags)
+
+/**
+ * dma_fence_assert_held - lockdep assertion that fence is locked
+ * @fence: the fence which should be locked
+ */
+#define dma_fence_assert_held(fence) \
+ lockdep_assert_held(dma_fence_spinlock(fence));
+
#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);
@@ -439,13 +510,19 @@ dma_fence_test_signaled_flag(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{
+ const struct dma_fence_ops *ops;
+
if (dma_fence_test_signaled_flag(fence))
return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ rcu_read_lock();
+ ops = rcu_dereference(fence->ops);
+ if (ops && ops->signaled && ops->signaled(fence)) {
+ rcu_read_unlock();
dma_fence_signal_locked(fence);
return true;
}
+ rcu_read_unlock();
return false;
}
@@ -469,13 +546,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
{
+ const struct dma_fence_ops *ops;
+
if (dma_fence_test_signaled_flag(fence))
return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ rcu_read_lock();
+ ops = rcu_dereference(fence->ops);
+ if (ops && ops->signaled && ops->signaled(fence)) {
+ rcu_read_unlock();
dma_fence_signal(fence);
return true;
}
+ rcu_read_unlock();
return false;
}
@@ -680,7 +763,7 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
*/
static inline bool dma_fence_is_array(struct dma_fence *fence)
{
- return fence->ops == &dma_fence_array_ops;
+ return rcu_access_pointer(fence->ops) == &dma_fence_array_ops;
}
/**
@@ -691,7 +774,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
*/
static inline bool dma_fence_is_chain(struct dma_fence *fence)
{
- return fence->ops == &dma_fence_chain_ops;
+ return rcu_access_pointer(fence->ops) == &dma_fence_chain_ops;
}
/**
diff --git a/include/linux/gpu_buddy.h b/include/linux/gpu_buddy.h
new file mode 100644
index 000000000000..f1fb6eff604a
--- /dev/null
+++ b/include/linux/gpu_buddy.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __GPU_BUDDY_H__
+#define __GPU_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/rbtree.h>
+
+/**
+ * GPU_BUDDY_RANGE_ALLOCATION - Allocate within a specific address range
+ *
+ * When set, allocation is restricted to the range [start, end) specified
+ * in gpu_buddy_alloc_blocks(). Without this flag, start/end are ignored
+ * and allocation can use any free space.
+ */
+#define GPU_BUDDY_RANGE_ALLOCATION BIT(0)
+
+/**
+ * GPU_BUDDY_TOPDOWN_ALLOCATION - Allocate from top of address space
+ *
+ * Allocate starting from high addresses and working down. Useful for
+ * separating different allocation types (e.g., kernel vs userspace)
+ * to reduce fragmentation.
+ */
+#define GPU_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+
+/**
+ * GPU_BUDDY_CONTIGUOUS_ALLOCATION - Require physically contiguous blocks
+ *
+ * The allocation must be satisfied with a single contiguous block.
+ * If the requested size cannot be allocated contiguously, the
+ * allocation fails with -ENOSPC.
+ */
+#define GPU_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
+
+/**
+ * GPU_BUDDY_CLEAR_ALLOCATION - Prefer pre-cleared (zeroed) memory
+ *
+ * Attempt to allocate from the clear tree first. If insufficient clear
+ * memory is available, falls back to dirty memory. Useful when the
+ * caller needs zeroed memory and wants to avoid GPU clear operations.
+ */
+#define GPU_BUDDY_CLEAR_ALLOCATION BIT(3)
+
+/**
+ * GPU_BUDDY_CLEARED - Mark returned blocks as cleared
+ *
+ * Used with gpu_buddy_free_list() to indicate that the memory being
+ * freed has been cleared (zeroed). The blocks will be placed in the
+ * clear tree for future GPU_BUDDY_CLEAR_ALLOCATION requests.
+ */
+#define GPU_BUDDY_CLEARED BIT(4)
+
+/**
+ * GPU_BUDDY_TRIM_DISABLE - Disable automatic block trimming
+ *
+ * By default, if an allocation is smaller than the allocated block,
+ * excess memory is trimmed and returned to the free pool. This flag
+ * disables trimming, keeping the full power-of-two block size.
+ */
+#define GPU_BUDDY_TRIM_DISABLE BIT(5)
+
+enum gpu_buddy_free_tree {
+ GPU_BUDDY_CLEAR_TREE = 0,
+ GPU_BUDDY_DIRTY_TREE,
+ GPU_BUDDY_MAX_FREE_TREES,
+};
+
+#define for_each_free_tree(tree) \
+ for ((tree) = 0; (tree) < GPU_BUDDY_MAX_FREE_TREES; (tree)++)
+
+/**
+ * struct gpu_buddy_block - Block within a buddy allocator
+ *
+ * Each block in the buddy allocator is represented by this structure.
+ * Blocks are organized in a binary tree where each parent block can be
+ * split into two children (left and right buddies). The allocator manages
+ * blocks at various orders (power-of-2 sizes) from chunk_size up to the
+ * largest contiguous region.
+ *
+ * @private: Private data owned by the allocator user (e.g., driver-specific data)
+ * @link: List node for user ownership while block is allocated
+ */
+struct gpu_buddy_block {
+/* private: */
+ /*
+ * Header bit layout:
+ * - Bits 63:12: block offset within the address space
+ * - Bits 11:10: state (ALLOCATED, FREE, or SPLIT)
+ * - Bit 9: clear bit (1 if memory is zeroed)
+ * - Bits 8:6: reserved
+ * - Bits 5:0: order (log2 of size relative to chunk_size)
+ */
+#define GPU_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define GPU_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define GPU_BUDDY_ALLOCATED (1 << 10)
+#define GPU_BUDDY_FREE (2 << 10)
+#define GPU_BUDDY_SPLIT (3 << 10)
+#define GPU_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
+/* Free to be used, if needed in the future */
+#define GPU_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
+#define GPU_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct gpu_buddy_block *left;
+ struct gpu_buddy_block *right;
+ struct gpu_buddy_block *parent;
+/* public: */
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through gpu_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * gpu_buddy_free* ownership is given back to the mm.
+ */
+ union {
+/* private: */
+ struct rb_node rb;
+/* public: */
+ struct list_head link;
+ };
+/* private: */
+ struct list_head tmp_link;
+};
+
+/* Order-zero must be at least SZ_4K */
+#define GPU_BUDDY_MAX_ORDER (63 - 12)
+
+/**
+ * struct gpu_buddy - GPU binary buddy allocator
+ *
+ * The buddy allocator provides efficient power-of-two memory allocation
+ * with fast allocation and free operations. It is commonly used for GPU
+ * memory management where allocations can be split into power-of-two
+ * block sizes.
+ *
+ * Locking should be handled by the user; a simple mutex around
+ * gpu_buddy_alloc_blocks() and gpu_buddy_free_block()/gpu_buddy_free_list()
+ * should suffice.
+ *
+ * @n_roots: Number of root blocks in the roots array.
+ * @max_order: Maximum block order (log2 of largest block size / chunk_size).
+ * @chunk_size: Minimum allocation granularity in bytes. Must be at least SZ_4K.
+ * @size: Total size of the address space managed by this allocator in bytes.
+ * @avail: Total free space currently available for allocation in bytes.
+ * @clear_avail: Free space available in the clear tree (zeroed memory) in bytes.
+ * This is a subset of @avail.
+ */
+struct gpu_buddy {
+/* private: */
+ /*
+ * Array of red-black trees for free block management.
+ * Indexed as free_trees[clear/dirty][order] where:
+ * - Index 0 (GPU_BUDDY_CLEAR_TREE): blocks with zeroed content
+ * - Index 1 (GPU_BUDDY_DIRTY_TREE): blocks with unknown content
+ * Each tree holds free blocks of the corresponding order.
+ */
+ struct rb_root **free_trees;
+ /*
+ * Array of root blocks representing the top-level blocks of the
+ * binary tree(s). Multiple roots exist when the total size is not
+ * a power of two, with each root being the largest power-of-two
+ * that fits in the remaining space.
+ */
+ struct gpu_buddy_block **roots;
+/* public: */
+ unsigned int n_roots;
+ unsigned int max_order;
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+ u64 clear_avail;
+};
+
+static inline u64
+gpu_buddy_block_offset(const struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+gpu_buddy_block_order(struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_ORDER;
+}
+
+static inline bool
+gpu_buddy_block_is_free(struct gpu_buddy_block *block)
+{
+ return (block->header & GPU_BUDDY_HEADER_STATE) == GPU_BUDDY_FREE;
+}
+
+static inline bool
+gpu_buddy_block_is_clear(struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_CLEAR;
+}
+
+static inline u64
+gpu_buddy_block_size(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block)
+{
+ return mm->chunk_size << gpu_buddy_block_order(block);
+}
+
+int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size);
+
+void gpu_buddy_fini(struct gpu_buddy *mm);
+
+int gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
+
+int gpu_buddy_block_trim(struct gpu_buddy *mm,
+ u64 *start,
+ u64 new_size,
+ struct list_head *blocks);
+
+void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear);
+
+void gpu_buddy_free_block(struct gpu_buddy *mm, struct gpu_buddy_block *block);
+
+void gpu_buddy_free_list(struct gpu_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags);
+
+void gpu_buddy_print(struct gpu_buddy *mm);
+void gpu_buddy_block_print(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block);
+#endif
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 4814a65b68dc..3abba45c0601 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -9,37 +9,12 @@
struct dma_fence;
-DECLARE_EVENT_CLASS(dma_fence,
-
- TP_PROTO(struct dma_fence *fence),
-
- TP_ARGS(fence),
-
- TP_STRUCT__entry(
- __string(driver, dma_fence_driver_name(fence))
- __string(timeline, dma_fence_timeline_name(fence))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
- ),
-
- TP_fast_assign(
- __assign_str(driver);
- __assign_str(timeline);
- __entry->context = fence->context;
- __entry->seqno = fence->seqno;
- ),
-
- TP_printk("driver=%s timeline=%s context=%u seqno=%u",
- __get_str(driver), __get_str(timeline), __entry->context,
- __entry->seqno)
-);
-
/*
* Safe only for call sites which are guaranteed to not race with fence
* signaling,holding the fence->lock and having checked for not signaled, or the
* signaling path itself.
*/
-DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+DECLARE_EVENT_CLASS(dma_fence,
TP_PROTO(struct dma_fence *fence),
@@ -64,14 +39,14 @@ DECLARE_EVENT_CLASS(dma_fence_unsignaled,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -85,14 +60,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index c89aede3cb12..ac66fa93b5a3 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -1423,6 +1423,22 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
/*
+ * ARM 64k interleaved modifier
+ *
+ * This is used by ARM Mali v10+ GPUs. With this modifier, the plane is divided
+ * into 64k byte 1:1 or 2:1 -sided tiles. The 64k tiles are laid out linearly.
+ * Each 64k tile is divided into blocks of 16x16 texel blocks, which are
+ * themselves laid out linearly within a 64k tile. Then within each 16x16
+ * block, texel blocks are laid out according to U order, similar to
+ * 16X16_BLOCK_U_INTERLEAVED.
+ *
+ * Note that unlike 16X16_BLOCK_U_INTERLEAVED, the layout does not change
+ * depending on whether a format is compressed or not.
+ */
+#define DRM_FORMAT_MOD_ARM_INTERLEAVED_64K \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 2ULL)
+
+/*
* Allwinner tiled modifier
*
* This tiling mode is implemented by the VPU found on all Allwinner platforms,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index cbbbfc1dfe2b..3693d82b5279 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -166,6 +166,10 @@ extern "C" {
#define DRM_MODE_LINK_STATUS_GOOD 0
#define DRM_MODE_LINK_STATUS_BAD 1
+/* Panel type property */
+#define DRM_MODE_PANEL_TYPE_UNKNOWN 0
+#define DRM_MODE_PANEL_TYPE_OLED 1
+
/*
* DRM_MODE_ROTATE_<degrees>
*
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index dd87f8f30793..1fa82fa6af38 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -432,6 +432,69 @@ struct drm_nouveau_exec {
__u64 push_ptr;
};
+struct drm_nouveau_get_zcull_info {
+ /**
+ * @width_align_pixels: required alignment for region widths, in pixels
+ * (typically #TPC's * 16).
+ */
+ __u32 width_align_pixels;
+ /**
+ * @height_align_pixels: required alignment for region heights, in
+ * pixels (typically 32).
+ */
+ __u32 height_align_pixels;
+ /**
+ * @pixel_squares_by_aliquots: the pixel area covered by an aliquot
+ * (typically #Zcull_banks * 16 * 16).
+ */
+ __u32 pixel_squares_by_aliquots;
+ /**
+ * @aliquot_total: the total aliquot pool available in hardware
+ */
+ __u32 aliquot_total;
+ /**
+ * @zcull_region_byte_multiplier: the size of an aliquot in bytes, which
+ * is used for save/restore operations on a region
+ */
+ __u32 zcull_region_byte_multiplier;
+ /**
+ * @zcull_region_header_size: the region header size in bytes, which is
+ * used for save/restore operations on a region
+ */
+ __u32 zcull_region_header_size;
+ /**
+ * @zcull_subregion_header_size: the subregion header size in bytes,
+ * which is used for save/restore operations on a region
+ */
+ __u32 zcull_subregion_header_size;
+ /**
+ * @subregion_count: the total number of subregions the hardware
+ * supports
+ */
+ __u32 subregion_count;
+ /**
+ * @subregion_width_align_pixels: required alignment for subregion
+ * widths, in pixels (typically #TPC's * 16).
+ */
+ __u32 subregion_width_align_pixels;
+ /**
+ * @subregion_height_align_pixels: required alignment for subregion
+ * heights, in pixels
+ */
+ __u32 subregion_height_align_pixels;
+
+ /**
+ * @ctxsw_size: the size, in bytes, of a zcull context switching region.
+ * Will be zero if the kernel does not support zcull context switching.
+ */
+ __u32 ctxsw_size;
+ /**
+ * @ctxsw_align: the alignment, in bytes, of a zcull context switching
+ * region
+ */
+ __u32 ctxsw_align;
+};
+
#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
@@ -445,6 +508,7 @@ struct drm_nouveau_exec {
#define DRM_NOUVEAU_VM_INIT 0x10
#define DRM_NOUVEAU_VM_BIND 0x11
#define DRM_NOUVEAU_EXEC 0x12
+#define DRM_NOUVEAU_GET_ZCULL_INFO 0x13
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -513,6 +577,8 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
+
+#define DRM_IOCTL_NOUVEAU_GET_ZCULL_INFO DRM_IOR (DRM_COMMAND_BASE + DRM_NOUVEAU_GET_ZCULL_INFO, struct drm_nouveau_get_zcull_info)
#if defined(__cplusplus)
}
#endif