summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/Makefile18
-rw-r--r--include/drm/bridge/analogix_dp.h7
-rw-r--r--include/drm/bridge/dw_mipi_dsi2.h95
-rw-r--r--include/drm/clients/drm_client_setup.h (renamed from include/drm/drm_client_setup.h)0
-rw-r--r--include/drm/display/drm_dp.h18
-rw-r--r--include/drm/display/drm_dp_dual_mode_helper.h2
-rw-r--r--include/drm/display/drm_dp_helper.h109
-rw-r--r--include/drm/display/drm_dp_mst_helper.h10
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h22
-rw-r--r--include/drm/display/drm_hdmi_helper.h6
-rw-r--r--include/drm/display/drm_hdmi_state_helper.h10
-rw-r--r--include/drm/drm_atomic.h57
-rw-r--r--include/drm/drm_atomic_helper.h2
-rw-r--r--include/drm/drm_bridge.h253
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_buddy.h2
-rw-r--r--include/drm/drm_client.h8
-rw-r--r--include/drm/drm_client_event.h2
-rw-r--r--include/drm/drm_connector.h138
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_damage_helper.h2
-rw-r--r--include/drm/drm_device.h49
-rw-r--r--include/drm/drm_drv.h13
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_encoder_slave.h241
-rw-r--r--include/drm/drm_fb_helper.h44
-rw-r--r--include/drm/drm_fbdev_client.h19
-rw-r--r--include/drm/drm_file.h12
-rw-r--r--include/drm/drm_format_helper.h6
-rw-r--r--include/drm/drm_framebuffer.h7
-rw-r--r--include/drm/drm_gem.h42
-rw-r--r--include/drm/drm_gem_shmem_helper.h47
-rw-r--r--include/drm/drm_gpusvm.h528
-rw-r--r--include/drm/drm_gpuvm.h5
-rw-r--r--include/drm/drm_kunit_helpers.h13
-rw-r--r--include/drm/drm_managed.h12
-rw-r--r--include/drm/drm_mipi_dsi.h28
-rw-r--r--include/drm/drm_mode_config.h10
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h11
-rw-r--r--include/drm/drm_pagemap.h107
-rw-r--r--include/drm/drm_panel.h50
-rw-r--r--include/drm/drm_panic.h19
-rw-r--r--include/drm/drm_plane.h17
-rw-r--r--include/drm/drm_print.h64
-rw-r--r--include/drm/drm_probe_helper.h2
-rw-r--r--include/drm/drm_util.h16
-rw-r--r--include/drm/drm_writeback.h6
-rw-r--r--include/drm/gpu_scheduler.h298
-rw-r--r--include/drm/i2c/ch7006.h87
-rw-r--r--include/drm/i2c/sil164.h64
-rw-r--r--include/drm/i2c/tda998x.h40
-rw-r--r--include/drm/intel/intel-gtt.h2
-rw-r--r--include/drm/intel/pciids.h79
-rw-r--r--include/drm/spsc_queue.h4
-rw-r--r--include/drm/ttm/ttm_backup.h72
-rw-r--r--include/drm/ttm/ttm_bo.h97
-rw-r--r--include/drm/ttm/ttm_pool.h8
-rw-r--r--include/drm/ttm/ttm_resource.h23
-rw-r--r--include/drm/ttm/ttm_tt.h69
60 files changed, 2247 insertions, 743 deletions
diff --git a/include/drm/Makefile b/include/drm/Makefile
new file mode 100644
index 000000000000..1df6962556ef
--- /dev/null
+++ b/include/drm/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find * -name '*.h' 2>/dev/null)
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 6002c5666031..cf17646c1310 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -10,16 +10,18 @@
#include <drm/drm_crtc.h>
struct analogix_dp_device;
+struct drm_dp_aux;
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
RK3399_EDP,
+ RK3588_EDP,
};
static inline bool is_rockchip(enum analogix_dp_devtype type)
{
- return type == RK3288_DP || type == RK3399_EDP;
+ return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP;
}
struct analogix_dp_plat_data {
@@ -48,4 +50,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux);
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp);
+
#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/bridge/dw_mipi_dsi2.h b/include/drm/bridge/dw_mipi_dsi2.h
new file mode 100644
index 000000000000..c18c49379247
--- /dev/null
+++ b/include/drm/bridge/dw_mipi_dsi2.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Authors: Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#ifndef __DW_MIPI_DSI2__
+#define __DW_MIPI_DSI2__
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+
+struct drm_display_mode;
+struct drm_encoder;
+struct dw_mipi_dsi2;
+struct mipi_dsi_device;
+struct platform_device;
+
+enum dw_mipi_dsi2_phy_type {
+ DW_MIPI_DSI2_DPHY,
+ DW_MIPI_DSI2_CPHY,
+};
+
+struct dw_mipi_dsi2_phy_iface {
+ int ppi_width;
+ enum dw_mipi_dsi2_phy_type phy_type;
+};
+
+struct dw_mipi_dsi2_phy_timing {
+ u32 data_hs2lp;
+ u32 data_lp2hs;
+};
+
+struct dw_mipi_dsi2_phy_ops {
+ int (*init)(void *priv_data);
+ void (*power_on)(void *priv_data);
+ void (*power_off)(void *priv_data);
+ void (*get_interface)(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface);
+ int (*get_lane_mbps)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps);
+ int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing);
+ int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
+};
+
+struct dw_mipi_dsi2_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
+struct dw_mipi_dsi2_plat_data {
+ struct regmap *regmap;
+ unsigned int max_data_lanes;
+
+ enum drm_mode_status (*mode_valid)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
+
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ const struct dw_mipi_dsi2_phy_ops *phy_ops;
+ const struct dw_mipi_dsi2_host_ops *host_ops;
+
+ void *priv_data;
+};
+
+struct dw_mipi_dsi2 *dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data);
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2);
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder);
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2);
+
+#endif /* __DW_MIPI_DSI2__ */
diff --git a/include/drm/drm_client_setup.h b/include/drm/clients/drm_client_setup.h
index 46aab3fb46be..46aab3fb46be 100644
--- a/include/drm/drm_client_setup.h
+++ b/include/drm/clients/drm_client_setup.h
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 3bd9f482f0c3..3001c0b6e7bb 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -697,6 +697,9 @@
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT 0x119 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED (1 << 0)
+
#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
# define DP_FEC_READY (1 << 0)
# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
@@ -997,6 +1000,7 @@
# define DP_EDP_14 0x03
# define DP_EDP_14a 0x04 /* eDP 1.4a */
# define DP_EDP_14b 0x05 /* eDP 1.4b */
+# define DP_EDP_15 0x06 /* eDP 1.5 */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -1021,6 +1025,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
+# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -1169,6 +1174,15 @@
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST 0x2211 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_MASK 0xff
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS 0x00
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS 0x01
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS 0x02
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS 0x03
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS 0x04
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS 0x05
+
#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0)
@@ -1474,6 +1488,8 @@
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+# define DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK 0x7f
+# define DP_EXTENDED_WAKE_TIMEOUT_GRANT (1 << 7)
#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
@@ -1656,7 +1672,7 @@ enum drm_dp_phy {
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
-#define EDP_DISPLAY_CTL_CAP_SIZE 3
+#define EDP_DISPLAY_CTL_CAP_SIZE 5
#define DP_LTTPR_COMMON_CAP_SIZE 8
#define DP_LTTPR_PHY_CAP_SIZE 3
diff --git a/include/drm/display/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h
index 7ee482265087..7ac6969db935 100644
--- a/include/drm/display/drm_dp_dual_mode_helper.h
+++ b/include/drm/display/drm_dp_dual_mode_helper.h
@@ -117,5 +117,5 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
- enum drm_lspcon_mode reqd_mode);
+ enum drm_lspcon_mode reqd_mode, int time_out);
#endif
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 279624833ea9..e4ca35143ff9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -518,6 +518,11 @@ struct drm_dp_aux {
* @powered_down: If true then the remote endpoint is powered down.
*/
bool powered_down;
+
+ /**
+ * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
+ */
+ bool no_zero_sized;
};
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
@@ -528,13 +533,72 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
* drm_dp_dpcd_readb() - read a single byte from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to read
* @valuep: location where the value of the register will be stored
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
unsigned int offset, u8 *valuep)
@@ -549,7 +613,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
* @value: value to write to the register
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_write_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
unsigned int offset, u8 value)
@@ -557,6 +622,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
return drm_dp_dpcd_write(aux, offset, &value, 1);
}
+/**
+ * drm_dp_dpcd_read_byte() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read_data(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_write_byte() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write_data(aux, offset, &value, 1);
+}
+
int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -566,6 +659,13 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
enum drm_dp_phy dp_phy,
u8 link_status[DP_LINK_STATUS_SIZE]);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision);
+
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count);
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux);
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms);
bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
u8 real_edid_checksum);
@@ -625,9 +725,12 @@ int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable);
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count);
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode);
void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
@@ -873,5 +976,7 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp);
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst);
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index a80ba457a858..2cfe1d4bfc96 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -222,6 +222,13 @@ struct drm_dp_mst_branch {
*/
struct list_head destroy_next;
+ /**
+ * @rad: Relative Address of the MST branch.
+ * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,
+ * unset and unused. For MST branches connected after mst_primary,
+ * in each element of rad[] the nibbles are ordered by the most
+ * signifcant 4 bits first and the least significant 4 bits second.
+ */
u8 rad[8];
u8 lct;
int num_ports;
@@ -867,8 +874,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count);
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp);
diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h
new file mode 100644
index 000000000000..c9a6faef4109
--- /dev/null
+++ b/include/drm/display/drm_hdmi_audio_helper.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+#define DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_hdmi_audio_funcs;
+
+struct device;
+
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ bool spdif_playback,
+ int sound_dai_port);
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
index 57e3b18c15ec..09145c9ee9fc 100644
--- a/include/drm/display/drm_hdmi_helper.h
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -28,4 +28,10 @@ unsigned long long
drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum hdmi_colorspace fmt);
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts);
+
#endif
diff --git a/include/drm/display/drm_hdmi_state_helper.h b/include/drm/display/drm_hdmi_state_helper.h
index 2d45fcfa4619..2349c0d0f00f 100644
--- a/include/drm/display/drm_hdmi_state_helper.h
+++ b/include/drm/display/drm_hdmi_state_helper.h
@@ -6,8 +6,11 @@
struct drm_atomic_state;
struct drm_connector;
struct drm_connector_state;
+struct drm_display_mode;
struct hdmi_audio_infoframe;
+enum drm_connector_status;
+
void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector,
struct drm_connector_state *new_conn_state);
@@ -19,5 +22,12 @@ int drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector
int drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector);
int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector,
struct drm_atomic_state *state);
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status);
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector);
+
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
#endif // DRM_HDMI_STATE_HELPER_H_
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 31ca88deb10d..38636a593c9d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -357,6 +357,37 @@ struct __drm_private_objs_state {
* States are added to an atomic update by calling drm_atomic_get_crtc_state(),
* drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
* private state structures, drm_atomic_get_private_obj_state().
+ *
+ * NOTE: struct drm_atomic_state first started as a single collection of
+ * entities state pointers (drm_plane_state, drm_crtc_state, etc.).
+ *
+ * At atomic_check time, you could get the state about to be committed
+ * from drm_atomic_state, and the one currently running from the
+ * entities state pointer (drm_crtc.state, for example). After the call
+ * to drm_atomic_helper_swap_state(), the entities state pointer would
+ * contain the state previously checked, and the drm_atomic_state
+ * structure the old state.
+ *
+ * Over time, and in order to avoid confusion, drm_atomic_state has
+ * grown to have both the old state (ie, the state we replace) and the
+ * new state (ie, the state we want to apply). Those names are stable
+ * during the commit process, which makes it easier to reason about.
+ *
+ * You can still find some traces of that evolution through some hooks
+ * or callbacks taking a drm_atomic_state parameter called names like
+ * "old_state". This doesn't necessarily mean that the previous
+ * drm_atomic_state is passed, but rather that this used to be the state
+ * collection we were replacing after drm_atomic_helper_swap_state(),
+ * but the variable name was never updated.
+ *
+ * Some atomic operations implementations followed a similar process. We
+ * first started to pass the entity state only. However, it was pretty
+ * cumbersome for drivers, and especially CRTCs, to retrieve the states
+ * of other components. Thus, we switched to passing the whole
+ * drm_atomic_state as a parameter to those operations. Similarly, the
+ * transition isn't complete yet, and one might still find atomic
+ * operations taking a drm_atomic_state pointer, or a component state
+ * pointer. The former is the preferred form.
*/
struct drm_atomic_state {
/**
@@ -376,8 +407,27 @@ struct drm_atomic_state {
*
* Allow full modeset. This is used by the ATOMIC IOCTL handler to
* implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
- * never consult this flag, instead looking at the output of
- * drm_atomic_crtc_needs_modeset().
+ * generally not consult this flag, but instead look at the output of
+ * drm_atomic_crtc_needs_modeset(). The detailed rules are:
+ *
+ * - Drivers must not consult @allow_modeset in the atomic commit path.
+ * Use drm_atomic_crtc_needs_modeset() instead.
+ *
+ * - Drivers must consult @allow_modeset before adding unrelated struct
+ * drm_crtc_state to this commit by calling
+ * drm_atomic_get_crtc_state(). See also the warning in the
+ * documentation for that function.
+ *
+ * - Drivers must never change this flag, it is under the exclusive
+ * control of userspace.
+ *
+ * - Drivers may consult @allow_modeset in the atomic check path, if
+ * they have the choice between an optimal hardware configuration
+ * which requires a modeset, and a less optimal configuration which
+ * can be committed without a modeset. An example would be suboptimal
+ * scanout FIFO allocation resulting in increased idle power
+ * consumption. This allows userspace to avoid flickering and delays
+ * for the normal composition loop at reasonable cost.
*/
bool allow_modeset : 1;
/**
@@ -575,6 +625,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_crtc *
drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9aa0a05aa072..53382fe93537 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -139,6 +139,8 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index e8d735b7f6a4..4e418a29a9ff 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -41,6 +41,8 @@ struct drm_display_info;
struct drm_minor;
struct drm_panel;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
/**
@@ -71,7 +73,7 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge,
+ int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
/**
@@ -303,7 +305,7 @@ struct drm_bridge_funcs {
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
@@ -323,7 +325,7 @@ struct drm_bridge_funcs {
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
@@ -340,7 +342,7 @@ struct drm_bridge_funcs {
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_post_disable:
@@ -359,7 +361,7 @@ struct drm_bridge_funcs {
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_duplicate_state:
@@ -677,6 +679,124 @@ struct drm_bridge_funcs {
const u8 *buffer, size_t len);
/**
+ * @hdmi_audio_startup:
+ *
+ * Called when ASoC starts an audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_startup)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_audio_prepare:
+ * Configures HDMI-encoder for audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_prepare)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @hdmi_audio_shutdown:
+ *
+ * Shut down the audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*hdmi_audio_shutdown)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_audio_mute_stream:
+ *
+ * Mute/unmute HDMI audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_mute_stream)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ bool enable, int direction);
+
+ /**
+ * @dp_audio_startup:
+ *
+ * Called when ASoC starts a DisplayPort audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_startup)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_prepare:
+ * Configures DisplayPort audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_prepare)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @dp_audio_shutdown:
+ *
+ * Shut down the DisplayPort audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*dp_audio_shutdown)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_mute_stream:
+ *
+ * Mute/unmute DisplayPort audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_mute_stream)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ bool enable, int direction);
+
+ /**
* @debugfs_init:
*
* Allows bridges to create bridge-specific debugfs files.
@@ -761,6 +881,32 @@ enum drm_bridge_ops {
* drivers.
*/
DRM_BRIDGE_OP_HDMI = BIT(4),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->hdmi_audio_prepare and
+ * &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5),
+ /**
+ * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->dp_audio_prepare and
+ * &drm_bridge_funcs->dp_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
};
/**
@@ -787,6 +933,18 @@ struct drm_bridge {
const struct drm_bridge_timings *timings;
/** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_bridge.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this bridge.
+ */
+ struct kref refcount;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -859,6 +1017,32 @@ struct drm_bridge {
* @DRM_BRIDGE_OP_HDMI is set.
*/
unsigned int max_bpc;
+
+ /**
+ * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
+ * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
+ */
+ struct device *hdmi_audio_dev;
+
+ /**
+ * @hdmi_audio_max_i2s_playback_channels: maximum number of playback
+ * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or
+ * @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ int hdmi_audio_max_i2s_playback_channels;
+
+ /**
+ * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
+ * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ unsigned int hdmi_audio_spdif_playback : 1;
+
+ /**
+ * @hdmi_audio_dai_port: sound DAI port for either of
+ * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is
+ * not used.
+ */
+ int hdmi_audio_dai_port;
};
static inline struct drm_bridge *
@@ -867,6 +1051,30 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
+void drm_bridge_put(struct drm_bridge *bridge);
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs);
+
+/**
+ * devm_drm_bridge_alloc - Allocate and initialize a bridge
+ * @dev: struct device of the bridge device
+ * @type: the type of the struct which contains struct &drm_bridge
+ * @member: the name of the &drm_bridge within @type
+ * @funcs: callbacks for this bridge
+ *
+ * The reference count of the returned bridge is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_bridge_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to new bridge, or ERR_PTR on failure.
+ */
+#define devm_drm_bridge_alloc(dev, type, member, funcs) \
+ ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs))
+
void drm_bridge_add(struct drm_bridge *bridge);
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
@@ -884,6 +1092,38 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
#endif
/**
+ * drm_bridge_get_current_state() - Get the current bridge state
+ * @bridge: bridge object
+ *
+ * This function must be called with the modeset lock held.
+ *
+ * RETURNS:
+ *
+ * The current bridge state, or NULL if there is none.
+ */
+static inline struct drm_bridge_state *
+drm_bridge_get_current_state(struct drm_bridge *bridge)
+{
+ if (!bridge)
+ return NULL;
+
+ /*
+ * Only atomic bridges will have bridge->base initialized by
+ * drm_atomic_private_obj_init(), so we need to make sure we're
+ * working with one before we try to use the lock.
+ */
+ if (!bridge->funcs || !bridge->funcs->atomic_reset)
+ return NULL;
+
+ drm_modeset_lock_assert_held(&bridge->base.lock);
+
+ if (!bridge->base.state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(bridge->base.state);
+}
+
+/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
@@ -1034,4 +1274,7 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void drm_bridge_debugfs_params(struct dentry *root);
+void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
+
#endif
diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h
new file mode 100644
index 000000000000..6c35b479ec2a
--- /dev/null
+++ b/include/drm/drm_bridge_helper.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __DRM_BRIDGE_HELPER_H_
+#define __DRM_BRIDGE_HELPER_H_
+
+struct drm_bridge;
+struct drm_modeset_acquire_ctx;
+
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif // __DRM_BRIDGE_HELPER_H_
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 9689a7c5dd36..513837632b7d 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
u64 new_size,
struct list_head *blocks);
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
void drm_buddy_free_list(struct drm_buddy *mm,
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 3b13cf29ed55..146ca80e35db 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -143,6 +143,14 @@ struct drm_client_dev {
bool suspended;
/**
+ * @hotplug_pending:
+ *
+ * A hotplug event has been received while the client was suspended.
+ * Try again on resume.
+ */
+ bool hotplug_pending;
+
+ /**
* @hotplug_failed:
*
* Set by client hotplug helpers if the hotplugging failed
diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h
index 99863554b055..1d544d3a3228 100644
--- a/include/drm/drm_client_event.h
+++ b/include/drm/drm_client_event.h
@@ -3,6 +3,8 @@
#ifndef _DRM_CLIENT_EVENT_H_
#define _DRM_CLIENT_EVENT_H_
+#include <linux/types.h>
+
struct drm_device;
#if defined(CONFIG_DRM_CLIENT)
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1e2b25e204cb..f13d597370a3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -45,7 +45,10 @@ struct drm_property;
struct drm_property_blob;
struct drm_printer;
struct drm_privacy_screen;
+struct drm_edid;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
enum drm_connector_force {
@@ -1141,6 +1144,53 @@ struct drm_connector_state {
struct drm_connector_hdmi_state hdmi;
};
+struct drm_connector_hdmi_audio_funcs {
+ /**
+ * @startup:
+ *
+ * Called when ASoC starts an audio stream setup. The
+ * @startup() is optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*startup)(struct drm_connector *connector);
+
+ /**
+ * @prepare:
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*prepare)(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @shutdown:
+ *
+ * Shut down the audio stream. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*shutdown)(struct drm_connector *connector);
+
+ /**
+ * @mute_stream:
+ *
+ * Mute/unmute HDMI audio stream. The @mute_stream callback is
+ * optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*mute_stream)(struct drm_connector *connector,
+ bool enable, int direction);
+};
+
/**
* struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
*/
@@ -1198,6 +1248,21 @@ struct drm_connector_hdmi_funcs {
int (*write_infoframe)(struct drm_connector *connector,
enum hdmi_infoframe_type type,
const u8 *buffer, size_t len);
+
+ /**
+ * @read_edid:
+ *
+ * This callback is used by the framework as a replacement for reading
+ * the EDID from connector->ddc. It is still recommended to provide
+ * connector->ddc instead of implementing this callback. Returned EDID
+ * should be freed via the drm_edid_free().
+ *
+ * The @read_edid callback is optional.
+ *
+ * Returns:
+ * Valid EDID on success, NULL in case of failure.
+ */
+ const struct drm_edid *(*read_edid)(struct drm_connector *connector);
};
/**
@@ -1660,6 +1725,68 @@ struct drm_cmdline_mode {
bool tv_mode_specified;
};
+/**
+ * struct drm_connector_hdmi_audio - DRM gemeric HDMI Codec-related structure
+ *
+ * HDMI drivers usually incorporate a HDMI Codec. This structure expresses the
+ * generic HDMI Codec as used by the DRM HDMI Codec framework.
+ */
+struct drm_connector_hdmi_audio {
+ /**
+ * @funcs:
+ *
+ * Implementation of the HDMI codec functionality to be used by the DRM
+ * HDMI Codec framework.
+ */
+ const struct drm_connector_hdmi_audio_funcs *funcs;
+
+ /**
+ * @codec_pdev:
+ *
+ * Platform device created to hold the HDMI Codec. It will be
+ * automatically unregistered during drm_connector_cleanup().
+ */
+ struct platform_device *codec_pdev;
+
+ /**
+ * @lock:
+ *
+ * Mutex to protect @last_state, @plugged_cb and @plugged_cb_dev.
+ */
+ struct mutex lock;
+
+ /**
+ * @plugged_cb:
+ *
+ * Callback to be called when the HDMI sink get plugged to or unplugged
+ * from this connector. This is assigned by the framework when
+ * requested by the ASoC code.
+ */
+ void (*plugged_cb)(struct device *dev, bool plugged);
+
+ /**
+ * @plugged_cb_dev:
+ *
+ * The data for @plugged_cb(). It is being provided by the ASoC.
+ */
+ struct device *plugged_cb_dev;
+
+ /**
+ * @last_state:
+ *
+ * Last plugged state recored by the framework. It is used to correctly
+ * report the state to @plugged_cb().
+ */
+ bool last_state;
+
+ /**
+ * @dai_port:
+ *
+ * The port in DT that is used for the Codec DAI.
+ */
+ int dai_port;
+};
+
/*
* struct drm_connector_hdmi - DRM Connector HDMI-related structure
*/
@@ -2121,6 +2248,11 @@ struct drm_connector {
* @hdmi: HDMI-related variable and properties.
*/
struct drm_connector_hdmi hdmi;
+
+ /**
+ * @hdmi_audio: HDMI codec properties and non-DRM state.
+ */
+ struct drm_connector_hdmi_audio hdmi_audio;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -2129,6 +2261,11 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
int drm_connector_init_with_ddc(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
@@ -2150,6 +2287,7 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
unsigned int max_bpc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
+int drm_connector_dynamic_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8b48a1974da3..caa56e039da2 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1323,5 +1323,5 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters);
-
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index effda42cce31..a58cbcd11276 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -78,7 +78,7 @@ bool
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
struct drm_rect *rect);
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect);
#endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index c91f87b5242d..e2f894f1b90a 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -21,6 +21,14 @@ struct inode;
struct pci_dev;
struct pci_controller;
+/*
+ * Recovery methods for wedged device in order of less to more side-effects.
+ * To be used with drm_dev_wedged_event() as recovery @method. Callers can
+ * use any one, multiple (or'd) or none depending on their needs.
+ */
+#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */
+#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */
+#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
/**
* enum switch_power_state - power state of drm device
@@ -57,6 +65,28 @@ struct drm_device {
struct device *dev;
/**
+ * @dma_dev:
+ *
+ * Device for DMA operations. Only required if the device @dev
+ * cannot perform DMA by itself. Should be NULL otherwise. Call
+ * drm_dev_dma_dev() to get the DMA device instead of using this
+ * field directly. Call drm_dev_set_dma_dev() to set this field.
+ *
+ * DRM devices are sometimes bound to virtual devices that cannot
+ * perform DMA by themselves. Drivers should set this field to the
+ * respective DMA controller.
+ *
+ * Devices on USB and other peripheral busses also cannot perform
+ * DMA by themselves. The @dma_dev field should point the bus
+ * controller that does DMA on behalve of such a device. Required
+ * for importing buffers via dma-buf.
+ *
+ * If set, the DRM core automatically releases the reference on the
+ * device.
+ */
+ struct device *dma_dev;
+
+ /**
* @managed:
*
* Managed resources linked to the lifetime of this &drm_device as
@@ -319,4 +349,23 @@ struct drm_device {
struct dentry *debugfs_root;
};
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
+
+/**
+ * drm_dev_dma_dev - returns the DMA device for a DRM device
+ * @dev: DRM device
+ *
+ * Returns the DMA device of the given DRM device. By default, this
+ * the DRM device's parent. See drm_dev_set_dma_dev().
+ *
+ * Returns:
+ * A DMA-capable device for the DRM device.
+ */
+static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
+{
+ if (dev->dma_dev)
+ return dev->dma_dev;
+ return dev->dev;
+}
+
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 1bbbcb8e2d23..63b51942d606 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_device.h>
+struct dmem_cgroup_region;
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_file;
@@ -401,8 +402,6 @@ struct drm_driver {
char *name;
/** @desc: driver description */
char *desc;
- /** @date: driver date, unused, to be removed */
- char *date;
/**
* @driver_features:
@@ -438,6 +437,10 @@ void *__devm_drm_dev_alloc(struct device *parent,
const struct drm_driver *driver,
size_t size, size_t offset);
+struct dmem_cgroup_region *
+drmm_cgroup_register_region(struct drm_device *dev,
+ const char *region_name, u64 size);
+
/**
* devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
* @parent: Parent device object
@@ -470,6 +473,11 @@ void *__devm_drm_dev_alloc(struct device *parent,
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
+
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset);
+
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
@@ -479,6 +487,7 @@ void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method);
/**
* drm_dev_is_unplugged - is a DRM device unplugged
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index eaac5e665892..b38409670868 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -437,7 +437,7 @@ bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
+ unsigned int hdisplay, unsigned int vdisplay);
int drm_edid_header_is_valid(const void *edid);
bool drm_edid_is_valid(struct edid *edid);
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
deleted file mode 100644
index 49172166a164..000000000000
--- a/include/drm/drm_encoder_slave.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_ENCODER_SLAVE_H__
-#define __DRM_ENCODER_SLAVE_H__
-
-#include <linux/i2c.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-
-/**
- * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
- *
- * Most of its members are analogous to the function pointers in
- * &drm_encoder_helper_funcs and they can optionally be used to
- * initialize the latter. Connector-like methods (e.g. @get_modes and
- * @set_property) will typically be wrapped around and only be called
- * if the encoder is the currently selected one for the connector.
- */
-struct drm_encoder_slave_funcs {
- /**
- * @set_config: Initialize any encoder-specific modesetting parameters.
- * The meaning of the @params parameter is implementation dependent. It
- * will usually be a structure with DVO port data format settings or
- * timings. It's not required for the new parameters to take effect
- * until the next mode is set.
- */
- void (*set_config)(struct drm_encoder *encoder,
- void *params);
-
- /**
- * @destroy: Analogous to &drm_encoder_funcs @destroy callback.
- */
- void (*destroy)(struct drm_encoder *encoder);
-
- /**
- * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback. Wrapped
- * by drm_i2c_encoder_dpms().
- */
- void (*dpms)(struct drm_encoder *encoder, int mode);
-
- /**
- * @save: Save state. Wrapped by drm_i2c_encoder_save().
- */
- void (*save)(struct drm_encoder *encoder);
-
- /**
- * @restore: Restore state. Wrapped by drm_i2c_encoder_restore().
- */
- void (*restore)(struct drm_encoder *encoder);
-
- /**
- * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup
- * callback. Wrapped by drm_i2c_encoder_mode_fixup().
- */
- bool (*mode_fixup)(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /**
- * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid.
- */
- int (*mode_valid)(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
- /**
- * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set
- * callback. Wrapped by drm_i2c_encoder_mode_set().
- */
- void (*mode_set)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /**
- * @detect: Analogous to &drm_encoder_helper_funcs @detect
- * callback. Wrapped by drm_i2c_encoder_detect().
- */
- enum drm_connector_status (*detect)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @get_modes: Get modes.
- */
- int (*get_modes)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @create_resources: Create resources.
- */
- int (*create_resources)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @set_property: Set property.
- */
- int (*set_property)(struct drm_encoder *encoder,
- struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
-};
-
-/**
- * struct drm_encoder_slave - Slave encoder struct
- *
- * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
- * ones in @base. The former are never actually called by the common
- * CRTC code, it's just a convenience for splitting the encoder
- * functions in an upper, GPU-specific layer and a (hopefully)
- * GPU-agnostic lower layer: It's the GPU driver responsibility to
- * call the slave methods when appropriate.
- *
- * drm_i2c_encoder_init() provides a way to get an implementation of
- * this.
- */
-struct drm_encoder_slave {
- /**
- * @base: DRM encoder object.
- */
- struct drm_encoder base;
-
- /**
- * @slave_funcs: Slave encoder callbacks.
- */
- const struct drm_encoder_slave_funcs *slave_funcs;
-
- /**
- * @slave_priv: Slave encoder private data.
- */
- void *slave_priv;
-
- /**
- * @bus_priv: Bus specific data.
- */
- void *bus_priv;
-};
-#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
-
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info);
-
-
-/**
- * struct drm_i2c_encoder_driver
- *
- * Describes a device driver for an encoder connected to the GPU through an I2C
- * bus.
- */
-struct drm_i2c_encoder_driver {
- /**
- * @i2c_driver: I2C device driver description.
- */
- struct i2c_driver i2c_driver;
-
- /**
- * @encoder_init: Callback to allocate any per-encoder data structures
- * and to initialize the @slave_funcs and (optionally) @slave_priv
- * members of @encoder.
- */
- int (*encoder_init)(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder);
-
-};
-#define to_drm_i2c_encoder_driver(x) container_of((x), \
- struct drm_i2c_encoder_driver, \
- i2c_driver)
-
-/**
- * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
- * @encoder: The encoder
- */
-static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
-{
- return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv;
-}
-
-/**
- * drm_i2c_encoder_register - Register an I2C encoder driver
- * @owner: Module containing the driver.
- * @driver: Driver to be registered.
- */
-static inline int drm_i2c_encoder_register(struct module *owner,
- struct drm_i2c_encoder_driver *driver)
-{
- return i2c_register_driver(owner, &driver->i2c_driver);
-}
-
-/**
- * drm_i2c_encoder_unregister - Unregister an I2C encoder driver
- * @driver: Driver to be unregistered.
- */
-static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver)
-{
- i2c_del_driver(&driver->i2c_driver);
-}
-
-void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
-
-
-/*
- * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
- */
-
-void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
-bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
-void drm_i2c_encoder_commit(struct drm_encoder *encoder);
-void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
-void drm_i2c_encoder_save(struct drm_encoder *encoder);
-void drm_i2c_encoder_restore(struct drm_encoder *encoder);
-
-
-#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8426b9921a03..c1d38d54a112 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -70,23 +70,6 @@ struct drm_fb_helper_surface_size {
*/
struct drm_fb_helper_funcs {
/**
- * @fb_probe:
- *
- * Driver callback to allocate and initialize the fbdev info structure.
- * Furthermore it also needs to allocate the DRM framebuffer used to
- * back the fbdev.
- *
- * This callback is mandatory.
- *
- * RETURNS:
- *
- * The driver should return 0 on success and a negative error code on
- * failure.
- */
- int (*fb_probe)(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-
- /**
* @fb_dirty:
*
* Driver callback to update the framebuffer memory. If set, fbdev
@@ -99,6 +82,33 @@ struct drm_fb_helper_funcs {
* 0 on success, or an error code otherwise.
*/
int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
+
+ /**
+ * @fb_restore:
+ *
+ * Driver callback to restore internal fbdev state. If set, fbdev
+ * emulation will invoke this callback after restoring the display
+ * mode.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_restore)(struct drm_fb_helper *helper);
+
+ /**
+ * @fb_set_suspend:
+ *
+ * Driver callback to suspend or resume, if set, fbdev emulation will
+ * invoke this callback during suspend and resume. Driver should call
+ * fb_set_suspend() from their implementation. If not set, fbdev
+ * emulation will invoke fb_set_suspend() directly.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_set_suspend)(struct drm_fb_helper *helper, bool suspend);
};
/**
diff --git a/include/drm/drm_fbdev_client.h b/include/drm/drm_fbdev_client.h
deleted file mode 100644
index e11a5614f127..000000000000
--- a/include/drm/drm_fbdev_client.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-
-#ifndef DRM_FBDEV_CLIENT_H
-#define DRM_FBDEV_CLIENT_H
-
-struct drm_device;
-struct drm_format_info;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
-#else
-static inline int drm_fbdev_client_setup(struct drm_device *dev,
- const struct drm_format_info *format)
-{
- return 0;
-}
-#endif
-
-#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index f0ef32e9fa5e..d344d41e6cfe 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -300,6 +300,9 @@ struct drm_file {
*
* Mapping of mm object handles to object pointers. Used by the GEM
* subsystem. Protected by @table_lock.
+ *
+ * Note that allocated entries might be NULL as a transient state when
+ * creating or deleting a handle.
*/
struct idr object_idr;
@@ -446,6 +449,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+__printf(2, 3)
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...);
+
void drm_file_update_pid(struct drm_file *);
struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
@@ -494,6 +500,12 @@ struct drm_memory_stats {
enum drm_gem_object_status;
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats);
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz);
void drm_print_memory_stats(struct drm_printer *p,
const struct drm_memory_stats *stats,
enum drm_gem_object_status supported_status,
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 428d81afe215..d8539174ca11 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -96,6 +96,9 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
@@ -110,6 +113,9 @@ void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *d
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 668077009fce..38b24fc8978d 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -23,6 +23,7 @@
#ifndef __DRM_FRAMEBUFFER_H__
#define __DRM_FRAMEBUFFER_H__
+#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/sched.h>
@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
unsigned num_clips);
};
+#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
+
/**
* struct drm_framebuffer - frame buffer object
*
@@ -189,6 +192,10 @@ struct drm_framebuffer {
*/
int flags;
/**
+ * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
+ */
+ unsigned int internal_flags;
+ /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 5b8b1b059d32..a3133a08267c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,6 +35,7 @@
*/
#include <linux/kref.h>
+#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -48,19 +49,21 @@ struct drm_gem_object;
* enum drm_gem_object_status - bitmask of object state for fdinfo reporting
* @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
* @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission
*
* Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
- * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if
- * it still active or not resident, in which case drm_show_fdinfo() will not
+ * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE
+ * and be active or not resident, in which case drm_show_fdinfo() will not
* account for it as purgeable. So drivers do not need to check if the buffer
- * is idle and resident to return this bit. (Ie. userspace can mark a buffer
- * as purgeable even while it is still busy on the GPU.. it does not _actually_
- * become puregeable until it becomes idle. The status gem object func does
- * not need to consider this.)
+ * is idle and resident to return this bit, i.e. userspace can mark a buffer as
+ * purgeable even while it is still busy on the GPU. It will not get reported in
+ * the puregeable stats until it becomes idle. The status gem object func does
+ * not need to consider this.
*/
enum drm_gem_object_status {
DRM_GEM_OBJECT_RESIDENT = BIT(0),
DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+ DRM_GEM_OBJECT_ACTIVE = BIT(2),
};
/**
@@ -156,7 +159,8 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -166,7 +170,8 @@ struct drm_gem_object_funcs {
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -189,7 +194,8 @@ struct drm_gem_object_funcs {
* @evict:
*
* Evicts gem object out from memory. Used by the drm_gem_object_evict()
- * helper. Returns 0 on success, -errno otherwise.
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
*
* This callback is optional.
*/
@@ -534,8 +540,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
@@ -558,7 +564,7 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj));
-int drm_gem_evict(struct drm_gem_object *obj);
+int drm_gem_evict_locked(struct drm_gem_object *obj);
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
@@ -573,6 +579,18 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje
return (obj->handle_count > 1) || obj->dma_buf;
}
+/**
+ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
+ * @obj: the GEM object
+ *
+ * Returns:
+ * True if the GEM object's buffer has been imported, false otherwise
+ */
+static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
+{
+ return !!obj->import_attach;
+}
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d22e3fb53631..b4f993da3cae 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -71,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
@@ -102,28 +113,28 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
struct vfsmount *gemfs);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
- !shmem->base.dma_buf && !shmem->base.import_attach;
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
+ !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -214,12 +225,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
}
/*
- * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
* @obj: GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
*
- * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -229,7 +240,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_vmap(shmem, map);
+ return drm_gem_shmem_vmap_locked(shmem, map);
}
/*
@@ -237,15 +248,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
* @obj: GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vunmap handler.
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
*/
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_vunmap(shmem, map);
+ drm_gem_shmem_vunmap_locked(shmem, map);
}
/**
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
new file mode 100644
index 000000000000..eaf704d3d05e
--- /dev/null
+++ b/include/drm/drm_gpusvm.h
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __DRM_GPUSVM_H__
+#define __DRM_GPUSVM_H__
+
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
+
+struct dev_pagemap_ops;
+struct drm_device;
+struct drm_gpusvm;
+struct drm_gpusvm_notifier;
+struct drm_gpusvm_ops;
+struct drm_gpusvm_range;
+struct drm_gpusvm_devmem;
+struct drm_pagemap;
+struct drm_pagemap_device_addr;
+
+/**
+ * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_gpusvm_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @dma_addr: Pointer to array of DMA addresses (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
+ */
+struct drm_gpusvm_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_gpusvm_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
+};
+
+/**
+ * struct drm_gpusvm_ops - Operations structure for GPU SVM
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM).
+ * These operations are provided by the GPU driver to manage SVM ranges and
+ * notifiers.
+ */
+struct drm_gpusvm_ops {
+ /**
+ * @notifier_alloc: Allocate a GPU SVM notifier (optional)
+ *
+ * Allocate a GPU SVM notifier.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
+ */
+ struct drm_gpusvm_notifier *(*notifier_alloc)(void);
+
+ /**
+ * @notifier_free: Free a GPU SVM notifier (optional)
+ * @notifier: Pointer to the GPU SVM notifier to be freed
+ *
+ * Free a GPU SVM notifier.
+ */
+ void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
+
+ /**
+ * @range_alloc: Allocate a GPU SVM range (optional)
+ * @gpusvm: Pointer to the GPU SVM
+ *
+ * Allocate a GPU SVM range.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
+ */
+ struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
+
+ /**
+ * @range_free: Free a GPU SVM range (optional)
+ * @range: Pointer to the GPU SVM range to be freed
+ *
+ * Free a GPU SVM range.
+ */
+ void (*range_free)(struct drm_gpusvm_range *range);
+
+ /**
+ * @invalidate: Invalidate GPU SVM notifier (required)
+ * @gpusvm: Pointer to the GPU SVM
+ * @notifier: Pointer to the GPU SVM notifier
+ * @mmu_range: Pointer to the mmu_notifier_range structure
+ *
+ * Invalidate the GPU page tables. It can safely walk the notifier range
+ * RB tree/list in this function. Called while holding the notifier lock.
+ */
+ void (*invalidate)(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range);
+};
+
+/**
+ * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: MMU interval notifier
+ * @itree: Interval tree node for the notifier (inserted in GPU SVM)
+ * @entry: List entry to fast interval tree traversal
+ * @root: Cached root node of the RB tree containing ranges
+ * @range_list: List head containing of ranges in the same order they appear in
+ * interval tree. This is useful to keep iterating ranges while
+ * doing modifications to RB tree.
+ * @flags: Flags for notifier
+ * @flags.removed: Flag indicating whether the MMU interval notifier has been
+ * removed
+ *
+ * This structure represents a GPU SVM notifier.
+ */
+struct drm_gpusvm_notifier {
+ struct drm_gpusvm *gpusvm;
+ struct mmu_interval_notifier notifier;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct rb_root_cached root;
+ struct list_head range_list;
+ struct {
+ u32 removed : 1;
+ } flags;
+};
+
+/**
+ * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags
+ *
+ * @migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @unmapped: Flag indicating if the range has been unmapped
+ * @partial_unmap: Flag indicating if the range has been partially unmapped
+ * @has_devmem_pages: Flag indicating if the range has devmem pages
+ * @has_dma_mapping: Flag indicating if the range has a DMA mapping
+ * @__flags: Flags for range in u16 form (used for READ_ONCE)
+ */
+struct drm_gpusvm_range_flags {
+ union {
+ struct {
+ /* All flags below must be set upon creation */
+ u16 migrate_devmem : 1;
+ /* All flags below must be set / cleared under notifier lock */
+ u16 unmapped : 1;
+ u16 partial_unmap : 1;
+ u16 has_devmem_pages : 1;
+ u16 has_dma_mapping : 1;
+ };
+ u16 __flags;
+ };
+};
+
+/**
+ * struct drm_gpusvm_range - Structure representing a GPU SVM range
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier
+ * @refcount: Reference count for the range
+ * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
+ * @entry: List entry to fast interval tree traversal
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @flags: Flags for range
+ *
+ * This structure represents a GPU SVM range used for tracking memory ranges
+ * mapped in a DRM device.
+ */
+struct drm_gpusvm_range {
+ struct drm_gpusvm *gpusvm;
+ struct drm_gpusvm_notifier *notifier;
+ struct kref refcount;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ unsigned long notifier_seq;
+ struct drm_pagemap_device_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ struct drm_gpusvm_range_flags flags;
+};
+
+/**
+ * struct drm_gpusvm - GPU SVM structure
+ *
+ * @name: Name of the GPU SVM
+ * @drm: Pointer to the DRM device structure
+ * @mm: Pointer to the mm_struct for the address space
+ * @device_private_page_owner: Device private pages owner
+ * @mm_start: Start address of GPU SVM
+ * @mm_range: Range of the GPU SVM
+ * @notifier_size: Size of individual notifiers
+ * @ops: Pointer to the operations structure for GPU SVM
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order.
+ * @num_chunks: Number of chunks
+ * @notifier_lock: Read-write semaphore for protecting notifier operations
+ * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
+ * @notifier_list: list head containing of notifiers in the same order they
+ * appear in interval tree. This is useful to keep iterating
+ * notifiers while doing modifications to RB tree.
+ *
+ * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
+ * memory ranges mapped in a DRM (Direct Rendering Manager) device.
+ *
+ * No reference counting is provided, as this is expected to be embedded in the
+ * driver VM structure along with the struct drm_gpuvm, which handles reference
+ * counting.
+ */
+struct drm_gpusvm {
+ const char *name;
+ struct drm_device *drm;
+ struct mm_struct *mm;
+ void *device_private_page_owner;
+ unsigned long mm_start;
+ unsigned long mm_range;
+ unsigned long notifier_size;
+ const struct drm_gpusvm_ops *ops;
+ const unsigned long *chunk_sizes;
+ int num_chunks;
+ struct rw_semaphore notifier_lock;
+ struct rb_root_cached root;
+ struct list_head notifier_list;
+#ifdef CONFIG_LOCKDEP
+ /**
+ * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove with a driver provided lock.
+ */
+ struct lockdep_map *lock_dep_map;
+#endif
+};
+
+/**
+ * struct drm_gpusvm_ctx - DRM GPU SVM context
+ *
+ * @check_pages_threshold: Check CPU pages for present if chunk is less than or
+ * equal to threshold. If not present, reduce chunk
+ * size.
+ * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
+ * remains with either exclusive GPU or CPU access.
+ * @in_notifier: entering from a MMU notifier
+ * @read_only: operating on read-only memory
+ * @devmem_possible: possible to use device memory
+ * @devmem_only: use only device memory
+ *
+ * Context that is DRM GPUSVM is operating in (i.e. user arguments).
+ */
+struct drm_gpusvm_ctx {
+ unsigned long check_pages_threshold;
+ unsigned long timeslice_ms;
+ unsigned int in_notifier :1;
+ unsigned int read_only :1;
+ unsigned int devmem_possible :1;
+ unsigned int devmem_only :1;
+};
+
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm, void *device_private_page_owner,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks);
+
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
+
+void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range);
+
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
+
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ struct drm_gpusvm_devmem *devmem_allocation,
+ const struct drm_gpusvm_ctx *ctx);
+
+int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
+
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end);
+
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range);
+
+void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_gpusvm_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this to annotate drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove.
+ */
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) \
+ do { \
+ if (!WARN((gpusvm)->lock_dep_map, \
+ "GPUSVM range lock should be set only once."))\
+ (gpusvm)->lock_dep_map = &(lock)->dep_map; \
+ } while (0)
+#else
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
+#endif
+
+/**
+ * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, take lock
+ */
+#define drm_gpusvm_notifier_lock(gpusvm__) \
+ down_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, drop lock
+ */
+#define drm_gpusvm_notifier_unlock(gpusvm__) \
+ up_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_range_start() - GPU SVM range start address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range start address
+ */
+static inline unsigned long
+drm_gpusvm_range_start(struct drm_gpusvm_range *range)
+{
+ return range->itree.start;
+}
+
+/**
+ * drm_gpusvm_range_end() - GPU SVM range end address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range end address
+ */
+static inline unsigned long
+drm_gpusvm_range_end(struct drm_gpusvm_range *range)
+{
+ return range->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_range_size() - GPU SVM range size
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range size
+ */
+static inline unsigned long
+drm_gpusvm_range_size(struct drm_gpusvm_range *range)
+{
+ return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
+}
+
+/**
+ * drm_gpusvm_notifier_start() - GPU SVM notifier start address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier start address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.start;
+}
+
+/**
+ * drm_gpusvm_notifier_end() - GPU SVM notifier end address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier end address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_notifier_size() - GPU SVM notifier size
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier size
+ */
+static inline unsigned long
+drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
+{
+ return drm_gpusvm_notifier_end(notifier) -
+ drm_gpusvm_notifier_start(notifier);
+}
+
+/**
+ * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
+ * @range: a pointer to the current GPU SVM range
+ *
+ * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
+ * current range is the last one or if the input range is NULL.
+ */
+static inline struct drm_gpusvm_range *
+__drm_gpusvm_range_next(struct drm_gpusvm_range *range)
+{
+ if (range && !list_is_last(&range->entry,
+ &range->notifier->range_list))
+ return list_next_entry(range, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges. If set, it indicates the start of
+ * the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
+ * to use while holding the driver SVM lock or the notifier lock.
+ */
+#define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \
+ for ((range__) = (range__) ?: \
+ drm_gpusvm_range_find((notifier__), (start__), (end__)); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = __drm_gpusvm_range_next(range__))
+
+#endif /* __DRM_GPUSVM_H__ */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 00d4e43b76b6..2a9629377633 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -812,6 +812,11 @@ enum drm_gpuva_op_type {
* @DRM_GPUVA_OP_PREFETCH: the prefetch op type
*/
DRM_GPUVA_OP_PREFETCH,
+
+ /**
+ * @DRM_GPUVA_OP_DRIVER: the driver defined op type
+ */
+ DRM_GPUVA_OP_DRIVER,
};
/**
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index afdd46ef04f7..4948379237e9 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -9,6 +9,7 @@
#include <kunit/test.h>
+struct drm_connector;
struct drm_crtc_funcs;
struct drm_crtc_helper_funcs;
struct drm_device;
@@ -95,8 +96,6 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
-struct drm_modeset_acquire_ctx *
-drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
struct drm_atomic_state *
drm_kunit_helper_atomic_state_alloc(struct kunit *test,
@@ -120,6 +119,16 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx);
+
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode);
+
struct drm_display_mode *
drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
u8 video_code);
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index f547b09ca023..53017cc609ac 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -127,4 +127,16 @@ void __drmm_mutex_release(struct drm_device *dev, void *res);
drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \
}) \
+void __drmm_workqueue_release(struct drm_device *device, void *wq);
+
+#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
+ ({ \
+ struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \
+ wq ? ({ \
+ int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \
+ ret ? ERR_PTR(ret) : wq; \
+ }) : \
+ wq; \
+ })
+
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 94400a78031f..6d2c08e81101 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -223,6 +223,9 @@ struct mipi_dsi_multi_context {
#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev)
+extern const struct bus_type mipi_dsi_bus_type;
+#define dev_is_mipi_dsi(dev) ((dev)->bus == &mipi_dsi_bus_type)
+
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
* given pixel format defined by the MIPI DSI
@@ -293,6 +296,7 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
#define mipi_dsi_msleep(ctx, delay) \
do { \
@@ -346,7 +350,6 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
@@ -379,6 +382,7 @@ void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
u16 start, u16 end);
void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
u16 scanline);
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
/**
* mipi_dsi_generic_write_seq - transmit data using a generic write packet
@@ -417,28 +421,6 @@ void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
} while (0)
/**
- * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
- *
- * This macro will print errors for you and will RETURN FROM THE CALLING
- * FUNCTION (yes this is non-intuitive) upon error.
- *
- * Because of the non-intuitive return behavior, THIS MACRO IS DEPRECATED.
- * Please replace calls of it with mipi_dsi_dcs_write_seq_multi().
- *
- * @dsi: DSI peripheral device
- * @cmd: Command
- * @seq: buffer containing data to be transmitted
- */
-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
- do { \
- static const u8 d[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer_chatty(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-/**
* mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload
*
* This macro will print errors for you and error handling is optimized for
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 271765e2e9f2..9e524b51a001 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -532,8 +532,8 @@ struct drm_mode_config {
*/
struct list_head privobj_list;
- int min_width, min_height;
- int max_width, max_height;
+ unsigned int min_width, min_height;
+ unsigned int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
/* output poll support */
@@ -937,6 +937,12 @@ struct drm_mode_config {
struct drm_property *modifiers_property;
/**
+ * @async_modifiers_property: Plane property to list support modifier/format
+ * combination for asynchronous flips.
+ */
+ struct drm_property *async_modifiers_property;
+
+ /**
* @size_hints_property: Plane SIZE_HINTS property.
*/
struct drm_property *size_hints_property;
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 08d7a7f0188f..c68edbd126d0 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -35,7 +35,7 @@ struct drm_file;
* @id: userspace visible identifier
* @type: type of the object, one of DRM_MODE_OBJECT\_\*
* @properties: properties attached to this object, including values
- * @refcount: reference count for objects which with dynamic lifetime
+ * @refcount: reference count for objects with dynamic lifetime
* @free_cb: free function callback, only set for objects with dynamic lifetime
*
* Base structure for modeset objects visible to userspace. Objects can be
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index ec59015aec3c..ce7c7aeac887 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -967,7 +967,7 @@ struct drm_connector_helper_funcs {
* drm_mode_status.
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/**
* @mode_valid_ctx:
@@ -1006,7 +1006,7 @@ struct drm_connector_helper_funcs {
*
*/
int (*mode_valid_ctx)(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
@@ -1400,13 +1400,18 @@ struct drm_plane_helper_funcs {
* given update can be committed asynchronously, that is, if it can
* jump ahead of the state currently queued for update.
*
+ * This function is also used by drm_atomic_set_property() to determine
+ * if the plane can be flipped in async. The flip flag is used to
+ * distinguish if the function is used for just the plane state or for a
+ * flip.
+ *
* RETURNS:
*
* Return 0 on success and any error returned indicates that the update
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *state, bool flip);
/**
* @atomic_async_update:
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
new file mode 100644
index 000000000000..202c157ff4d7
--- /dev/null
+++ b/include/drm/drm_pagemap.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _DRM_PAGEMAP_H_
+#define _DRM_PAGEMAP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+
+struct drm_pagemap;
+struct device;
+
+/**
+ * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
+ *
+ * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
+ * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
+ */
+enum drm_interconnect_protocol {
+ DRM_INTERCONNECT_SYSTEM,
+ DRM_INTERCONNECT_DRIVER,
+ /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
+};
+
+/**
+ * struct drm_pagemap_device_addr - Device address representation.
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Note: There is room for improvement here. We should be able to pack into
+ * 64 bits.
+ */
+struct drm_pagemap_device_addr {
+ dma_addr_t addr;
+ u64 proto : 54;
+ u64 order : 8;
+ u64 dir : 2;
+};
+
+/**
+ * drm_pagemap_device_addr_encode() - Encode a dma address with metadata
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Return: A struct drm_pagemap_device_addr encoding the above information.
+ */
+static inline struct drm_pagemap_device_addr
+drm_pagemap_device_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ return (struct drm_pagemap_device_addr) {
+ .addr = addr,
+ .proto = proto,
+ .order = order,
+ .dir = dir,
+ };
+}
+
+/**
+ * struct drm_pagemap_ops: Ops for a drm-pagemap.
+ */
+struct drm_pagemap_ops {
+ /**
+ * @device_map: Map for device access or provide a virtual address suitable for
+ *
+ * @dpagemap: The struct drm_pagemap for the page.
+ * @dev: The device mapper.
+ * @page: The page to map.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The transfer direction.
+ */
+ struct drm_pagemap_device_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
+
+ /**
+ * @device_unmap: Unmap a device address previously obtained using @device_map.
+ *
+ * @dpagemap: The struct drm_pagemap for the mapping.
+ * @dev: The device unmapper.
+ * @addr: The device address obtained when mapping.
+ */
+ void (*device_unmap)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct drm_pagemap_device_addr addr);
+
+};
+
+/**
+ * struct drm_pagemap: Additional information for a struct dev_pagemap
+ * used for device p2p handshaking.
+ * @ops: The struct drm_pagemap_ops.
+ * @dev: The struct drevice owning the device-private memory.
+ */
+struct drm_pagemap {
+ const struct drm_pagemap_ops *ops;
+ struct device *dev;
+};
+
+#endif
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 10015891b056..843fb756a295 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,12 +28,12 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kref.h>
struct backlight_device;
struct dentry;
struct device_node;
struct drm_connector;
-struct drm_device;
struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -267,20 +267,60 @@ struct drm_panel {
* If true then the panel has been enabled.
*/
bool enabled;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_panel.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this panel.
+ */
+ struct kref refcount;
};
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
+
+/**
+ * devm_drm_panel_alloc - Allocate and initialize a refcounted panel.
+ *
+ * @dev: struct device of the panel device
+ * @type: the type of the struct which contains struct &drm_panel
+ * @member: the name of the &drm_panel within @type
+ * @funcs: callbacks for this panel
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
+ *
+ * The reference count of the returned panel is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_panel_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to container structure embedding the panel, ERR_PTR on failure.
+ */
+#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \
+ ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ connector_type))
+
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
+struct drm_panel *drm_panel_get(struct drm_panel *panel);
+void drm_panel_put(struct drm_panel *panel);
+
void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_prepare(struct drm_panel *panel);
-int drm_panel_unprepare(struct drm_panel *panel);
+void drm_panel_prepare(struct drm_panel *panel);
+void drm_panel_unprepare(struct drm_panel *panel);
-int drm_panel_enable(struct drm_panel *panel);
-int drm_panel_disable(struct drm_panel *panel);
+void drm_panel_enable(struct drm_panel *panel);
+void drm_panel_disable(struct drm_panel *panel);
int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index f4e1fa9ae607..310c88c4d336 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -40,6 +40,16 @@ struct drm_scanout_buffer {
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
/**
+ * @pages: Optional, if the scanout buffer is not mapped, set this field
+ * to the array of pages of the scanout buffer. The panic code will use
+ * kmap_local_page_try_from_panic() to map one page at a time to write
+ * all the pixels. This array shouldn't be allocated from the
+ * get_scanoutbuffer() callback.
+ * The scanout buffer should be in linear format.
+ */
+ struct page **pages;
+
+ /**
* @width: Width of the scanout buffer, in pixels.
*/
unsigned int width;
@@ -57,7 +67,7 @@ struct drm_scanout_buffer {
/**
* @set_pixel: Optional function, to set a pixel color on the
* framebuffer. It allows to handle special tiling format inside the
- * driver.
+ * driver. It takes precedence over the @map and @pages fields.
*/
void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color);
@@ -163,4 +173,11 @@ static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags)
#endif
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+
+u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+ u8 *tmp, size_t tmp_size);
+#endif
+
#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index dd718c62ac31..01479dd94e76 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -549,6 +549,23 @@ struct drm_plane_funcs {
*/
bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format,
uint64_t modifier);
+ /**
+ * @format_mod_supported_async:
+ *
+ * This optional hook is used for the DRM to determine if for
+ * asynchronous flip the given format/modifier combination is valid for
+ * the plane. This allows the DRM to generate the correct format
+ * bitmask (which formats apply to which modifier), and to validate
+ * modifiers at atomic_check time.
+ *
+ * Returns:
+ *
+ * True if the given modifier is valid for that format on the plane.
+ * False otherwise.
+ */
+ bool (*format_mod_supported_async)(struct drm_plane *plane,
+ u32 format, u64 modifier);
+
};
/**
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index b3906dc04388..ab017b05e175 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -32,6 +32,7 @@
#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+#include <drm/drm_device.h>
struct debugfs_regset32;
struct drm_device;
@@ -199,6 +200,8 @@ void drm_puts(struct drm_printer *p, const char *str);
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
void drm_print_bits(struct drm_printer *p, unsigned long value,
const char * const bits[], unsigned int nbits);
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len);
__printf(2, 0)
/**
@@ -342,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter)
}
/**
+ * drm_coredump_printer_is_full() - DRM coredump printer output is full
+ * @p: DRM coredump printer
+ *
+ * DRM printer output is full, useful to short circuit coredump printing once
+ * printer is full.
+ *
+ * RETURNS:
+ * True if DRM coredump printer output buffer is full, False otherwise
+ */
+static inline bool drm_coredump_printer_is_full(struct drm_printer *p)
+{
+ struct drm_print_iterator *iterator = p->arg;
+
+ if (p->printfn != __drm_printfn_coredump)
+ return true;
+
+ return !iterator->remain;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -581,9 +604,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
* Prefer drm_device based logging over device or prink based logging.
*/
+/* Helper to enforce struct drm_device type */
+static inline struct device *__drm_to_dev(const struct drm_device *drm)
+{
+ return drm ? drm->dev : NULL;
+}
+
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
@@ -617,25 +646,25 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg_driver(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
@@ -724,10 +753,9 @@ void __drm_err(const char *format, ...);
#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
- const struct drm_device *drm_ = (drm); \
\
if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
- drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
+ drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
#define drm_dbg_ratelimited(drm, fmt, ...) \
@@ -749,13 +777,13 @@ void __drm_err(const char *format, ...);
/* Helper for struct drm_device based WARNs */
#define drm_WARN(drm, condition, format, arg...) \
WARN(condition, "%s %s: [drm] " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ONCE(drm, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: [drm] " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ON(drm, x) \
drm_WARN((drm), (x), "%s", \
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index d6ce7b218b77..840ae5f798c2 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
-int drmm_kms_helper_poll_init(struct drm_device *dev);
+void drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 79952d8c4bba..440199618620 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -36,6 +36,7 @@
#include <linux/kgdb.h>
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/util_macros.h>
/*
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
@@ -48,21 +49,6 @@
#endif
/**
- * for_each_if - helper for handling conditionals in various for_each macros
- * @condition: The condition to check
- *
- * Typical use::
- *
- * #define for_each_foo_bar(x, y) \'
- * list_for_each_entry(x, y->list, head) \'
- * for_each_if(x->something == SOMETHING)
- *
- * The for_each_if() macro makes the use of for_each_foo_bar() less error
- * prone.
- */
-#define for_each_if(condition) if (!(condition)) {} else
-
-/**
* drm_can_sleep - returns true if currently okay to sleep
*
* This function shall not be used in new code.
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 17e576c80169..c380a7b8f55a 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -161,6 +161,12 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
const struct drm_connector_funcs *con_funcs, const u32 *formats,
int n_formats);
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats);
+
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 95e17504e46a..1eed74c7f2f7 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -71,12 +71,6 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_COUNT
};
-/* Used to choose between FIFO and RR job-scheduling */
-extern int drm_sched_policy;
-
-#define DRM_SCHED_POLICY_RR 0
-#define DRM_SCHED_POLICY_FIFO 1
-
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
@@ -338,8 +332,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
- struct list_head list;
+ u64 id;
+
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
/**
* @sched:
@@ -349,24 +349,30 @@ struct drm_sched_job {
* has finished.
*/
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
+ struct drm_sched_entity *entity;
+ enum drm_sched_priority s_priority;
u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
*/
union {
- struct dma_fence_cb finish_cb;
- struct work_struct work;
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
};
- uint64_t id;
- atomic_t karma;
- enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+
/**
* @dependencies:
*
@@ -375,26 +381,17 @@ struct drm_sched_job {
* drm_sched_job_add_implicit_dependencies().
*/
struct xarray dependencies;
-
- /** @last_dependency: tracks @dependencies as they signal */
- unsigned long last_dependency;
-
- /**
- * @submit_ts:
- *
- * When the job was pushed into the entity queue.
- */
- ktime_t submit_ts;
};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return s_job && atomic_inc_return(&s_job->karma) > threshold;
-}
-
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ */
enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NONE,
DRM_GPU_SCHED_STAT_NOMINAL,
DRM_GPU_SCHED_STAT_ENODEV,
};
@@ -420,10 +417,36 @@ struct drm_sched_backend_ops {
struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
@@ -431,43 +454,52 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
- * This method is called in a workqueue context.
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
*
- * Drivers typically issue a reset to recover from GPU hangs, and this
- * procedure usually follows the following workflow:
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
*
- * 1. Stop the scheduler using drm_sched_stop(). This will park the
- * scheduler thread and cancel the timeout work, guaranteeing that
- * nothing is queued while we reset the hardware queue
- * 2. Try to gracefully stop non-faulty jobs (optional)
- * 3. Issue a GPU reset (driver-specific)
- * 4. Re-submit jobs using drm_sched_resubmit_jobs()
- * 5. Restart the scheduler using drm_sched_start(). At that point, new
- * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
+ *
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
*
* Note that some GPUs have distinct hardware queues but need to reset
* the GPU globally, which requires extra synchronization between the
- * timeout handler of the different &drm_gpu_scheduler. One way to
- * achieve this synchronization is to create an ordered workqueue
- * (using alloc_ordered_workqueue()) at the driver level, and pass this
- * queue to drm_sched_init(), to guarantee that timeout handlers are
- * executed sequentially. The above workflow needs to be slightly
- * adjusted in that case:
- *
- * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
- * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
- * the reset (optional)
- * 3. Issue a GPU reset on all faulty queues (driver-specific)
- * 4. Re-submit jobs on all schedulers impacted by the reset using
- * drm_sched_resubmit_jobs()
- * 5. Restart all schedulers that were stopped in step #1 using
- * drm_sched_start()
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
*
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
*
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
*/
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
@@ -478,17 +510,22 @@ struct drm_sched_backend_ops {
void (*free_job)(struct drm_sched_job *sched_job);
/**
- * @update_job_credits: Called when the scheduler is considering this
- * job for execution.
+ * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
+ * get signaled in drm_sched_fini().
+ *
+ * Used by the scheduler to cancel all jobs that have not been executed
+ * with &struct drm_sched_backend_ops.run_job by the time
+ * drm_sched_fini() gets invoked.
*
- * This callback returns the number of credits the job would take if
- * pushed to the hardware. Drivers may use this to dynamically update
- * the job's credit count. For instance, deduct the number of credits
- * for already signalled native fences.
+ * Drivers need to signal the passed job's hardware fence with an
+ * appropriate error code (e.g., -ECANCELED) in this callback. They
+ * must not free the job.
*
- * This callback is optional.
+ * The scheduler will only call this callback once it stopped calling
+ * all other callbacks forever, with the exception of &struct
+ * drm_sched_backend_ops.free_job.
*/
- u32 (*update_job_credits)(struct drm_sched_job *sched_job);
+ void (*cancel_job)(struct drm_sched_job *sched_job);
};
/**
@@ -553,18 +590,66 @@ struct drm_gpu_scheduler {
struct device *dev;
};
+/**
+ * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
+ *
+ * @ops: backend operations provided by the driver
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used.
+ * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but may be less.
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @hang_limit: number of times to allow a job to hang before dropping it.
+ * This mechanism is DEPRECATED. Set it to 0.
+ * @timeout: timeout value in jiffies for submitted jobs.
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
+ * @score: score atomic shared with other schedulers. May be NULL.
+ * @name: name (typically the driver's name). Used for debugging
+ * @dev: associated device. Used for debugging
+ */
+struct drm_sched_init_args {
+ const struct drm_sched_backend_ops *ops;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ u32 num_rqs;
+ u32 credit_limit;
+ unsigned int hang_limit;
+ long timeout;
+ atomic_t *score;
+ const char *name;
+ struct device *dev;
+};
+
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev);
+ const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
+
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
@@ -577,35 +662,18 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write);
-
-
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-
-void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
-void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
-void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
-void drm_sched_reset_karma(struct drm_sched_job *bad);
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
-void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq, ktime_t ts);
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -615,29 +683,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
int drm_sched_entity_error(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_alloc(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_init(struct drm_sched_fence *fence,
- struct drm_sched_entity *entity);
-void drm_sched_fence_free(struct drm_sched_fence *fence);
-
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
- struct dma_fence *parent);
-void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
deleted file mode 100644
index 5305b9797f93..000000000000
--- a/include/drm/i2c/ch7006.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_CH7006_H__
-#define __DRM_I2C_CH7006_H__
-
-/**
- * struct ch7006_encoder_params
- *
- * Describes how the ch7006 is wired up with the GPU. It should be
- * used as the @params parameter of its @set_config method.
- *
- * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
- * meaning.
- */
-struct ch7006_encoder_params {
- /* private: FIXME: document the members */
- enum {
- CH7006_FORMAT_RGB16 = 0,
- CH7006_FORMAT_YCrCb24m16,
- CH7006_FORMAT_RGB24m16,
- CH7006_FORMAT_RGB15,
- CH7006_FORMAT_RGB24m12C,
- CH7006_FORMAT_RGB24m12I,
- CH7006_FORMAT_RGB24m8,
- CH7006_FORMAT_RGB16m8,
- CH7006_FORMAT_RGB15m8,
- CH7006_FORMAT_YCrCb24m8,
- } input_format;
-
- enum {
- CH7006_CLOCK_SLAVE = 0,
- CH7006_CLOCK_MASTER,
- } clock_mode;
-
- enum {
- CH7006_CLOCK_EDGE_NEG = 0,
- CH7006_CLOCK_EDGE_POS,
- } clock_edge;
-
- int xcm, pcm;
-
- enum {
- CH7006_SYNC_SLAVE = 0,
- CH7006_SYNC_MASTER,
- } sync_direction;
-
- enum {
- CH7006_SYNC_SEPARATED = 0,
- CH7006_SYNC_EMBEDDED,
- } sync_encoding;
-
- enum {
- CH7006_POUT_1_8V = 0,
- CH7006_POUT_3_3V,
- } pout_level;
-
- enum {
- CH7006_ACTIVE_HSYNC = 0,
- CH7006_ACTIVE_DSTART,
- } active_detect;
-};
-
-#endif
diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h
deleted file mode 100644
index ddf248693c8b..000000000000
--- a/include/drm/i2c/sil164.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_SIL164_H__
-#define __DRM_I2C_SIL164_H__
-
-/**
- * struct sil164_encoder_params
- *
- * Describes how the sil164 is connected to the GPU. It should be used
- * as the @params parameter of its @set_config method.
- *
- * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
- */
-struct sil164_encoder_params {
- /* private: FIXME: document the members */
- enum {
- SIL164_INPUT_EDGE_FALLING = 0,
- SIL164_INPUT_EDGE_RISING
- } input_edge;
-
- enum {
- SIL164_INPUT_WIDTH_12BIT = 0,
- SIL164_INPUT_WIDTH_24BIT
- } input_width;
-
- enum {
- SIL164_INPUT_SINGLE_EDGE = 0,
- SIL164_INPUT_DUAL_EDGE
- } input_dual;
-
- enum {
- SIL164_PLL_FILTER_ON = 0,
- SIL164_PLL_FILTER_OFF,
- } pll_filter;
-
- int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
- int duallink_skew; /** < Allowed range [-4, 3]. */
-};
-
-#endif
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
deleted file mode 100644
index 3cb25ccbe5e6..000000000000
--- a/include/drm/i2c/tda998x.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_I2C_TDA998X_H__
-#define __DRM_I2C_TDA998X_H__
-
-#include <linux/hdmi.h>
-#include <dt-bindings/display/tda998x.h>
-
-enum {
- AFMT_UNUSED = 0,
- AFMT_SPDIF = TDA998x_SPDIF,
- AFMT_I2S = TDA998x_I2S,
-};
-
-struct tda998x_audio_params {
- u8 config;
- u8 format;
- unsigned sample_width;
- unsigned sample_rate;
- struct hdmi_audio_infoframe cea;
- u8 status[5];
-};
-
-struct tda998x_encoder_params {
- u8 swap_b:3;
- u8 mirr_b:1;
- u8 swap_a:3;
- u8 mirr_a:1;
- u8 swap_d:3;
- u8 mirr_d:1;
- u8 swap_c:3;
- u8 mirr_c:1;
- u8 swap_f:3;
- u8 mirr_f:1;
- u8 swap_e:3;
- u8 mirr_e:1;
-
- struct tda998x_audio_params audio_params;
-};
-
-#endif
diff --git a/include/drm/intel/intel-gtt.h b/include/drm/intel/intel-gtt.h
index cb0d5b7200c7..f53bcff01f22 100644
--- a/include/drm/intel/intel-gtt.h
+++ b/include/drm/intel/intel-gtt.h
@@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 32480b5563db..a7ce9523c50d 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -717,37 +717,66 @@
MACRO__(0xA7AB, ## __VA_ARGS__)
/* DG2 */
-#define INTEL_DG2_G10_IDS(MACRO__, ...) \
- MACRO__(0x5690, ## __VA_ARGS__), \
- MACRO__(0x5691, ## __VA_ARGS__), \
- MACRO__(0x5692, ## __VA_ARGS__), \
+#define INTEL_DG2_G10_D_IDS(MACRO__, ...) \
MACRO__(0x56A0, ## __VA_ARGS__), \
MACRO__(0x56A1, ## __VA_ARGS__), \
- MACRO__(0x56A2, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_E_IDS(MACRO__, ...) \
MACRO__(0x56BE, ## __VA_ARGS__), \
MACRO__(0x56BF, ## __VA_ARGS__)
-#define INTEL_DG2_G11_IDS(MACRO__, ...) \
- MACRO__(0x5693, ## __VA_ARGS__), \
- MACRO__(0x5694, ## __VA_ARGS__), \
- MACRO__(0x5695, ## __VA_ARGS__), \
+#define INTEL_DG2_G10_M_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_D_IDS(MACRO__, ...) \
MACRO__(0x56A5, ## __VA_ARGS__), \
MACRO__(0x56A6, ## __VA_ARGS__), \
MACRO__(0x56B0, ## __VA_ARGS__), \
- MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_E_IDS(MACRO__, ...) \
MACRO__(0x56BA, ## __VA_ARGS__), \
MACRO__(0x56BB, ## __VA_ARGS__), \
MACRO__(0x56BC, ## __VA_ARGS__), \
MACRO__(0x56BD, ## __VA_ARGS__)
-#define INTEL_DG2_G12_IDS(MACRO__, ...) \
- MACRO__(0x5696, ## __VA_ARGS__), \
- MACRO__(0x5697, ## __VA_ARGS__), \
+#define INTEL_DG2_G11_M_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_IDS(MACRO__, ...) \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_D_IDS(MACRO__, ...) \
MACRO__(0x56A3, ## __VA_ARGS__), \
MACRO__(0x56A4, ## __VA_ARGS__), \
MACRO__(0x56B2, ## __VA_ARGS__), \
MACRO__(0x56B3, ## __VA_ARGS__)
+#define INTEL_DG2_G12_M_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_IDS(MACRO__, ...) \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_D_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__)
+
#define INTEL_DG2_IDS(MACRO__, ...) \
INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \
INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \
@@ -782,9 +811,12 @@
INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
/* MTL */
-#define INTEL_MTL_IDS(MACRO__, ...) \
+#define INTEL_MTL_U_IDS(MACRO__, ...) \
MACRO__(0x7D40, ## __VA_ARGS__), \
- MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__)
+
+#define INTEL_MTL_IDS(MACRO__, ...) \
+ INTEL_MTL_U_IDS(MACRO__, ## __VA_ARGS__), \
MACRO__(0x7D55, ## __VA_ARGS__), \
MACRO__(0x7D60, ## __VA_ARGS__), \
MACRO__(0x7DD5, ## __VA_ARGS__)
@@ -817,18 +849,25 @@
MACRO__(0xE20B, ## __VA_ARGS__), \
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
- MACRO__(0xE212, ## __VA_ARGS__)
+ MACRO__(0xE210, ## __VA_ARGS__), \
+ MACRO__(0xE211, ## __VA_ARGS__), \
+ MACRO__(0xE212, ## __VA_ARGS__), \
+ MACRO__(0xE215, ## __VA_ARGS__), \
+ MACRO__(0xE216, ## __VA_ARGS__)
/* PTL */
#define INTEL_PTL_IDS(MACRO__, ...) \
MACRO__(0xB080, ## __VA_ARGS__), \
MACRO__(0xB081, ## __VA_ARGS__), \
MACRO__(0xB082, ## __VA_ARGS__), \
+ MACRO__(0xB083, ## __VA_ARGS__), \
+ MACRO__(0xB084, ## __VA_ARGS__), \
+ MACRO__(0xB085, ## __VA_ARGS__), \
+ MACRO__(0xB086, ## __VA_ARGS__), \
+ MACRO__(0xB087, ## __VA_ARGS__), \
+ MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
- MACRO__(0xB091, ## __VA_ARGS__), \
- MACRO__(0xB092, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0A1, ## __VA_ARGS__), \
- MACRO__(0xB0A2, ## __VA_ARGS__)
+ MACRO__(0xB0B0, ## __VA_ARGS__)
#endif /* __PCIIDS_H__ */
diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
index 125f096c88cb..ee9df8cc67b7 100644
--- a/include/drm/spsc_queue.h
+++ b/include/drm/spsc_queue.h
@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n
preempt_disable();
+ atomic_inc(&queue->job_count);
+ smp_mb__after_atomic();
+
tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
WRITE_ONCE(*tail, node);
- atomic_inc(&queue->job_count);
/*
* In case of first element verify new node will be visible to the consumer
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
new file mode 100644
index 000000000000..c33cba111171
--- /dev/null
+++ b/include/drm/ttm/ttm_backup.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _TTM_BACKUP_H_
+#define _TTM_BACKUP_H_
+
+#include <linux/mm_types.h>
+#include <linux/shmem_fs.h>
+
+/**
+ * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
+ * @handle: The handle to convert.
+ *
+ * Converts an opaque handle received from the
+ * ttm_backup_backup_page() function to an (invalid)
+ * struct page pointer suitable for a struct page array.
+ *
+ * Return: An (invalid) struct page pointer.
+ */
+static inline struct page *
+ttm_backup_handle_to_page_ptr(unsigned long handle)
+{
+ return (struct page *)(handle << 1 | 1);
+}
+
+/**
+ * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle
+ * @page: The struct page pointer to check.
+ *
+ * Return: true if the struct page pointer is a handld returned from
+ * ttm_backup_handle_to_page_ptr(). False otherwise.
+ */
+static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
+{
+ return (unsigned long)page & 1;
+}
+
+/**
+ * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle
+ * @page: The struct page pointer to convert
+ *
+ * Return: The handle that was previously used in
+ * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
+ * for use as argument in the struct ttm_backup_drop() or
+ * ttm_backup_copy_page() functions.
+ */
+static inline unsigned long
+ttm_backup_page_ptr_to_handle(const struct page *page)
+{
+ WARN_ON(!ttm_backup_page_ptr_is_handle(page));
+ return (unsigned long)page >> 1;
+}
+
+void ttm_backup_drop(struct file *backup, pgoff_t handle);
+
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
+ pgoff_t handle, bool intr);
+
+s64
+ttm_backup_backup_page(struct file *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp);
+
+void ttm_backup_fini(struct file *backup);
+
+u64 ttm_backup_bytes_avail(void);
+
+struct file *ttm_backup_shmem_create(loff_t size);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 5804408815be..cf027558b6db 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -172,7 +172,6 @@ struct ttm_bo_kmap_obj {
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
* BOs share the same reservation object.
- * @force_alloc: Don't check the memory account during suspend or CPU page
* faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
* @bytes_moved: Statistics on how many bytes have been moved.
@@ -185,7 +184,6 @@ struct ttm_operation_ctx {
bool no_wait_gpu;
bool gfp_retry_mayfail;
bool allow_res_evict;
- bool force_alloc;
struct dma_resv *resv;
uint64_t bytes_moved;
};
@@ -226,6 +224,27 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target);
/**
+ * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
+ * @purge: Purge the content rather than backing it up.
+ * @writeback: Attempt to immediately write content to swap space.
+ * @allow_move: Allow moving to system before shrinking. This is typically
+ * not desired for zombie- or ghost objects (with zombie object meaning
+ * objects with a zero gem object refcount)
+ */
+struct ttm_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+ u32 allow_move : 1;
+};
+
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags);
+
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+
+bool ttm_bo_shrink_avoid_wait(void);
+
+/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
@@ -421,6 +440,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo);
int ttm_bo_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
struct ttm_operation_ctx *ctx);
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
@@ -465,4 +486,76 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
int ttm_bo_populate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
+/* Driver LRU walk helpers initially targeted for shrinking. */
+
+/**
+ * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
+ */
+struct ttm_bo_lru_cursor {
+ /** @res_curs: Embedded struct ttm_resource_cursor. */
+ struct ttm_resource_cursor res_curs;
+ /**
+ * @ctx: The struct ttm_operation_ctx used while looping.
+ * governs the locking mode.
+ */
+ struct ttm_operation_ctx *ctx;
+ /**
+ * @bo: Buffer object pointer if a buffer object is refcounted,
+ * NULL otherwise.
+ */
+ struct ttm_buffer_object *bo;
+ /**
+ * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
+ * unlock before the next iteration or after loop exit.
+ */
+ bool needs_unlock;
+};
+
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
+
+/*
+ * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
+ */
+DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
+ if (_T) {ttm_bo_lru_cursor_fini(_T); },
+ ttm_bo_lru_cursor_init(curs, man, ctx),
+ struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+static inline void *
+class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
+{ return *_T; }
+#define class_ttm_bo_lru_cursor_is_conditional false
+
+/**
+ * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
+ * resources on LRU lists.
+ * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
+ * @_man: The resource manager whose LRU lists to iterate over.
+ * @_ctx: The struct ttm_operation_context to govern the @_bo locking.
+ * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
+ * for the current iteration.
+ *
+ * Iterate over all resources of @_man and for each resource, attempt to
+ * reference and lock (using the locking mode detailed in @_ctx) the buffer
+ * object it points to. If successful, assign @_bo to the address of the
+ * buffer object and update @_cursor. The iteration is guarded in the
+ * sense that @_cursor will be initialized before looping start and cleaned
+ * up at looping termination, even if terminated prematurely by, for
+ * example a return or break statement. Exiting the loop will also unlock
+ * (if needed) and unreference @_bo.
+ */
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
+
#endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 160d954a261e..54cd34a6e4c0 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -33,6 +33,7 @@
struct device;
struct seq_file;
+struct ttm_backup_flags;
struct ttm_operation_ctx;
struct ttm_pool;
struct ttm_tt;
@@ -89,6 +90,13 @@ void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+void ttm_pool_drop_backed_up(struct ttm_tt *tt);
+
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm,
+ const struct ttm_backup_flags *flags);
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
int ttm_pool_mgr_init(unsigned long num_pages);
void ttm_pool_mgr_fini(void);
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index be034be56ba1..e52bba15012f 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -38,6 +38,7 @@
#define TTM_MAX_BO_PRIORITY 4U
#define TTM_NUM_MEM_TYPES 8
+struct dmem_cgroup_device;
struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
@@ -211,6 +212,11 @@ struct ttm_resource_manager {
* bdev->lru_lock.
*/
uint64_t usage;
+
+ /**
+ * @cg: &dmem_cgroup_region used for memory accounting, if not NULL.
+ */
+ struct dmem_cgroup_region *cg;
};
/**
@@ -239,6 +245,7 @@ struct ttm_bus_placement {
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
* @bo: weak reference to the BO, protected by ttm_device::lru_lock
+ * @css: cgroup state this resource is charged to
*
* Structure indicating the placement and space resources used by a
* buffer object.
@@ -251,6 +258,8 @@ struct ttm_resource {
struct ttm_bus_placement bus;
struct ttm_buffer_object *bo;
+ struct dmem_cgroup_pool_state *css;
+
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
@@ -325,6 +334,9 @@ struct ttm_resource_cursor {
unsigned int priority;
};
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man);
+
void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
/**
@@ -432,7 +444,8 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res);
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
bool ttm_resource_intersects(struct ttm_device *bdev,
struct ttm_resource *res,
@@ -456,8 +469,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor);
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor);
struct ttm_resource *
ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
@@ -466,14 +478,13 @@ ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
- * @man: the resource manager
* @cursor: struct ttm_resource_cursor for the current position
* @res: the current resource
*
* Iterate over all the evictable resources in a resource manager.
*/
-#define ttm_resource_manager_for_each_res(man, cursor, res) \
- for (res = ttm_resource_manager_first(man, cursor); res; \
+#define ttm_resource_manager_for_each_res(cursor, res) \
+ for (res = ttm_resource_manager_first(cursor); res; \
res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 991edafdb2dd..406437ad674b 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -32,11 +32,13 @@
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
+struct ttm_backup;
struct ttm_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
+struct ttm_pool_tt_restore;
/**
* struct ttm_tt - This is a structure holding the pages, caching- and aperture
@@ -85,17 +87,22 @@ struct ttm_tt {
* fault handling abuses the DMA api a bit and dma_map_attrs can't be
* used to assure pgprot always matches.
*
+ * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the
+ * struct ttm_tt has been (possibly partially) backed up.
+ *
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
* set by TTM after ttm_tt_populate() has successfully returned, and is
* then unset when TTM calls ttm_tt_unpopulate().
+ *
*/
#define TTM_TT_FLAG_SWAPPED BIT(0)
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
#define TTM_TT_FLAG_EXTERNAL BIT(2)
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
#define TTM_TT_FLAG_DECRYPTED BIT(4)
+#define TTM_TT_FLAG_BACKED_UP BIT(5)
-#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(6)
uint32_t page_flags;
/** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
@@ -106,10 +113,19 @@ struct ttm_tt {
/** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
/**
+ * @backup: Pointer to backup struct for backed up tts.
+ * Could be unified with @swap_storage. Meanwhile, the driver's
+ * ttm_tt_create() callback is responsible for assigning
+ * this field.
+ */
+ struct file *backup;
+ /**
* @caching: The current caching state of the pages, see enum
* ttm_caching.
*/
enum ttm_caching caching;
+ /** @restore: Partial restoration from backup state. TTM private */
+ struct ttm_pool_tt_restore *restore;
};
/**
@@ -129,9 +145,38 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
+/**
+ * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt)
{
- return tt->page_flags & TTM_TT_FLAG_SWAPPED;
+ return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP);
+}
+
+/**
+ * ttm_tt_is_backed_up() - Whether the ttm_tt backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_BACKED_UP;
+}
+
+/**
+ * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status
+ * @tt: The struct ttm_tt.
+ *
+ * Drivers can use this functionto clear the backed-up status,
+ * for example before destroying or re-validating a purged tt.
+ */
+static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt)
+{
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
}
/**
@@ -235,6 +280,26 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt);
unsigned long ttm_tt_pages_limit(void);
+
+/**
+ * struct ttm_backup_flags - Flags to govern backup behaviour.
+ * @purge: Free pages without backing up. Bypass pools.
+ * @writeback: Attempt to copy contents directly to swap space, even
+ * if that means blocking on writes to external memory.
+ */
+struct ttm_backup_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags);
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
+int ttm_tt_setup_backup(struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>