summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-03-08 23:06:57 +0300
committerDave Airlie <airlied@redhat.com>2026-03-08 23:07:03 +0300
commit5f0a63f81a027becb06a71406e0941c5d12e074d (patch)
tree644b64d1ab50a79b9cc7d4a55168d5e7cd9bca5b
parent057ad0ef4da61a8ba654c691e3fc3933d92b7d5f (diff)
parentd2e20c8951e4bb5f4a828aed39813599980353b6 (diff)
downloadlinux-5f0a63f81a027becb06a71406e0941c5d12e074d.tar.xz
Merge tag 'drm-misc-next-2026-03-05' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v7.1: Cross-subsystem Changes: dma-buf: - Prepare for compile-time concurrency analysis Core Changes: buddy: - Improve assert testing sched: - Fix race condition in drm_sched_fini() - Mark slow tests Driver Changes: bridge: - waveshare-dsi: Fix register and attach; Support 1..4 DSI lanes plus DT bindings gma500: - Use DRM client buffer for fbdev framebuffer gud: - Test for imported buffers with helper imagination: - Fix power domain handling ivpu: - Update boot API to v3.29.4 - Limit per-user number of doorbells and contexts nouveau: - Test for imported buffers with helper panel: - panel-edp: Fix timings for BOE NV140WUM-N64 panfrost: - Test for imported buffers with helper panthor: - Test for imported buffers with helper vc4: - Test for imported buffers with helper Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patch.msgid.link/20260305081140.GA171266@linux.fritz.box
-rw-r--r--Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml3
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c94
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h26
-rw-r--r--drivers/accel/ivpu/ivpu_job.c36
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h211
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/gpu/buddy.c36
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c14
-rw-r--r--drivers/gpu/drm/drm_client.c3
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c101
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c104
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c38
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/tests/gpu_buddy_test.c2
-rw-r--r--include/drm/drm_client.h3
24 files changed, 388 insertions, 371 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml b/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml
index 5e8498c8303d..3820dd7e11af 100644
--- a/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml
@@ -40,9 +40,12 @@ properties:
properties:
data-lanes:
description: array of physical DSI data lane indexes.
+ minItems: 1
items:
- const: 1
- const: 2
+ - const: 3
+ - const: 4
required:
- data-lanes
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 5900a40c7a78..dd3a486df5f1 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -67,6 +67,73 @@ bool ivpu_force_snoop;
module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
+static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
+{
+ struct ivpu_user_limits *limits;
+
+ limits = kzalloc_obj(*limits);
+ if (!limits)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&limits->ref);
+ atomic_set(&limits->db_count, 0);
+ limits->vdev = vdev;
+ limits->uid = uid;
+
+ /* Allow root user to allocate all contexts */
+ if (uid == 0) {
+ limits->max_ctx_count = ivpu_get_context_count(vdev);
+ limits->max_db_count = ivpu_get_doorbell_count(vdev);
+ } else {
+ limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
+ limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
+ }
+
+ hash_add(vdev->user_limits, &limits->hash_node, uid);
+
+ return limits;
+}
+
+static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
+{
+ struct ivpu_user_limits *limits;
+ uid_t uid = current_uid().val;
+
+ guard(mutex)(&vdev->user_limits_lock);
+
+ hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
+ if (limits->uid == uid) {
+ if (kref_read(&limits->ref) >= limits->max_ctx_count) {
+ ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
+ limits->max_ctx_count);
+ return ERR_PTR(-EMFILE);
+ }
+
+ kref_get(&limits->ref);
+ return limits;
+ }
+ }
+
+ return ivpu_user_limits_alloc(vdev, uid);
+}
+
+static void ivpu_user_limits_release(struct kref *ref)
+{
+ struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
+ struct ivpu_device *vdev = limits->vdev;
+
+ lockdep_assert_held(&vdev->user_limits_lock);
+ drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
+ hash_del(&limits->hash_node);
+ kfree(limits);
+}
+
+static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
+{
+ guard(mutex)(&vdev->user_limits_lock);
+ kref_put(&limits->ref, ivpu_user_limits_release);
+}
+
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -110,6 +177,7 @@ static void file_priv_release(struct kref *ref)
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_user_limits_put(vdev, file_priv->user_limits);
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
@@ -169,7 +237,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
- args->value = ivpu_get_context_count(vdev);
+ args->value = file_priv->user_limits->max_ctx_count;
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start;
@@ -231,22 +299,30 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
+ struct ivpu_user_limits *limits;
u32 ctx_id;
int idx, ret;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
+ limits = ivpu_user_limits_get(vdev);
+ if (IS_ERR(limits)) {
+ ret = PTR_ERR(limits);
+ goto err_dev_exit;
+ }
+
file_priv = kzalloc_obj(*file_priv);
if (!file_priv) {
ret = -ENOMEM;
- goto err_dev_exit;
+ goto err_user_limits_put;
}
INIT_LIST_HEAD(&file_priv->ms_instance_list);
file_priv->vdev = vdev;
file_priv->bound = true;
+ file_priv->user_limits = limits;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
mutex_init(&file_priv->ms_lock);
@@ -284,6 +360,8 @@ err_unlock:
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
+err_user_limits_put:
+ ivpu_user_limits_put(vdev, limits);
err_dev_exit:
drm_dev_exit(idx);
return ret;
@@ -343,8 +421,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
ivpu_ipc_consumer_del(vdev, &cons);
if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
- ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
- ipc_hdr.data_addr);
+ ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
return -EIO;
}
@@ -453,7 +530,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
}
static const struct file_operations ivpu_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
DRM_ACCEL_FOPS,
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
@@ -592,6 +669,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
INIT_LIST_HEAD(&vdev->bo_list);
+ hash_init(vdev->user_limits);
vdev->db_limit.min = IVPU_MIN_DB;
vdev->db_limit.max = IVPU_MAX_DB;
@@ -600,6 +678,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
if (ret)
goto err_xa_destroy;
@@ -717,7 +799,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
- { }
+ {}
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 5b34b6f50e69..6378e23e0c97 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -12,6 +12,7 @@
#include <drm/drm_mm.h>
#include <drm/drm_print.h>
+#include <linux/hashtable.h>
#include <linux/pci.h>
#include <linux/xarray.h>
#include <uapi/drm/ivpu_accel.h>
@@ -43,7 +44,7 @@
/* SSID 1 is used by the VPU to represent reserved context */
#define IVPU_RESERVED_CONTEXT_MMU_SSID 1
#define IVPU_USER_CONTEXT_MIN_SSID 2
-#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 128)
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255
@@ -51,9 +52,6 @@
#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
-#define IVPU_NUM_PRIORITIES 4
-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
-
#define IVPU_CMDQ_MIN_ID 1
#define IVPU_CMDQ_MAX_ID 255
@@ -123,6 +121,16 @@ struct ivpu_fw_info;
struct ivpu_ipc_info;
struct ivpu_pm_info;
+struct ivpu_user_limits {
+ struct hlist_node hash_node;
+ struct ivpu_device *vdev;
+ struct kref ref;
+ u32 max_ctx_count;
+ u32 max_db_count;
+ u32 uid;
+ atomic_t db_count;
+};
+
struct ivpu_device {
struct drm_device drm;
void __iomem *regb;
@@ -142,6 +150,8 @@ struct ivpu_device {
struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa;
struct xa_limit context_xa_limit;
+ DECLARE_HASHTABLE(user_limits, 8);
+ struct mutex user_limits_lock; /* Protects user_limits */
struct xarray db_xa;
struct xa_limit db_limit;
@@ -189,6 +199,7 @@ struct ivpu_file_priv {
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
struct xa_limit job_limit;
+ struct ivpu_user_limits *user_limits;
u32 job_id_next;
struct xa_limit cmdq_limit;
u32 cmdq_id_next;
@@ -286,6 +297,13 @@ static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
return (ctx_limit.max - ctx_limit.min + 1);
}
+static inline u32 ivpu_get_doorbell_count(struct ivpu_device *vdev)
+{
+ struct xa_limit db_limit = vdev->db_limit;
+
+ return (db_limit.max - db_limit.min + 1);
+}
+
static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
{
WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index fe02b7bd465b..f0154dfa6ddc 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -173,7 +173,7 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate command queue ID: %d\n", ret);
goto err_free_cmdq;
}
@@ -215,14 +215,22 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
+ struct ivpu_user_limits *limits = file_priv->user_limits;
struct ivpu_device *vdev = file_priv->vdev;
int ret;
+ if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
+ ivpu_dbg(vdev, IOCTL, "Maximum number of %u doorbells for uid %u reached\n",
+ limits->max_db_count, limits->uid);
+ ret = -EBUSY;
+ goto err_dec_db_count;
+ }
+
ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
GFP_KERNEL);
if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
- return ret;
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate doorbell ID: %d\n", ret);
+ goto err_dec_db_count;
}
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
@@ -231,15 +239,18 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
-
- if (!ret) {
- ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
- cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
- } else {
+ if (ret) {
xa_erase(&vdev->db_xa, cmdq->db_id);
cmdq->db_id = 0;
+ goto err_dec_db_count;
}
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+ return 0;
+
+err_dec_db_count:
+ atomic_dec(&limits->db_count);
return ret;
}
@@ -298,6 +309,7 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
}
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ atomic_dec(&file_priv->user_limits->db_count);
cmdq->db_id = 0;
return 0;
@@ -313,6 +325,7 @@ static inline u8 ivpu_job_to_jsm_priority(u8 priority)
static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
+ lockdep_assert_held(&file_priv->lock);
ivpu_cmdq_unregister(file_priv, cmdq);
xa_erase(&file_priv->cmdq_xa, cmdq->id);
ivpu_cmdq_free(file_priv, cmdq);
@@ -380,8 +393,11 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
mutex_lock(&file_priv->lock);
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
- xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
- cmdq->db_id = 0;
+ if (cmdq->db_id) {
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ atomic_dec(&file_priv->user_limits->db_count);
+ cmdq->db_id = 0;
+ }
}
mutex_unlock(&file_priv->lock);
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 218468bbbcad..a41170bbc6b7 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,12 +1,22 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2024, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Boot
+ * @{
+ */
+
+/**
+ * @file
+ * @brief Boot API public header file.
*/
#ifndef VPU_BOOT_API_H
#define VPU_BOOT_API_H
-/*
+/**
* The below values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
@@ -16,24 +26,24 @@
* partial info a build error will be generated.
*/
-/*
+/**
* Major version changes that break backward compatibility.
* Major version must start from 1 and can only be incremented.
*/
#define VPU_BOOT_API_VER_MAJOR 3
-/*
+/**
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 28
+#define VPU_BOOT_API_VER_MINOR 29
-/*
+/**
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 3
+#define VPU_BOOT_API_VER_PATCH 4
-/*
+/**
* Index in the API version table
* Must be unique for each API
*/
@@ -41,7 +51,7 @@
#pragma pack(push, 4)
-/*
+/**
* Firmware image header format
*/
#define VPU_FW_HEADER_SIZE 4096
@@ -61,44 +71,41 @@ struct vpu_firmware_header {
u32 firmware_version_size;
u64 boot_params_load_address;
u32 api_version[VPU_FW_API_VER_NUM];
- /* Size of memory require for firmware execution */
+ /** Size of memory require for firmware execution */
u32 runtime_size;
u32 shave_nn_fw_size;
- /*
+ /**
* Size of primary preemption buffer, assuming a 2-job submission queue.
* NOTE: host driver is expected to adapt size accordingly to actual
* submission queue size and device capabilities.
*/
u32 preemption_buffer_1_size;
- /*
+ /**
* Size of secondary preemption buffer, assuming a 2-job submission queue.
* NOTE: host driver is expected to adapt size accordingly to actual
* submission queue size and device capabilities.
*/
u32 preemption_buffer_2_size;
- /*
+ /**
* Maximum preemption buffer size that the FW can use: no need for the host
* driver to allocate more space than that specified by these fields.
* A value of 0 means no declared limit.
*/
u32 preemption_buffer_1_max_size;
u32 preemption_buffer_2_max_size;
- /* Space reserved for future preemption-related fields. */
+ /** Space reserved for future preemption-related fields. */
u32 preemption_reserved[4];
- /* FW image read only section start address, 4KB aligned */
+ /** FW image read only section start address, 4KB aligned */
u64 ro_section_start_address;
- /* FW image read only section size, 4KB aligned */
+ /** FW image read only section size, 4KB aligned */
u32 ro_section_size;
u32 reserved;
};
-/*
+/**
* Firmware boot parameters format
*/
-#define VPU_BOOT_PLL_COUNT 3
-#define VPU_BOOT_PLL_OUT_COUNT 4
-
/** Values for boot_type field */
#define VPU_BOOT_TYPE_COLDBOOT 0
#define VPU_BOOT_TYPE_WARMBOOT 1
@@ -166,7 +173,7 @@ enum vpu_trace_destination {
#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22
#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23
-/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
+/** VPU 30xx HW component IDs are sequential, so define first and last IDs. */
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
@@ -175,15 +182,7 @@ struct vpu_boot_l2_cache_config {
u8 cfg;
};
-struct vpu_warm_boot_section {
- u32 src;
- u32 dst;
- u32 size;
- u32 core_id;
- u32 is_clear_op;
-};
-
-/*
+/**
* When HW scheduling mode is enabled, a present period is defined.
* It will be used by VPU to swap between normal and focus priorities
* to prevent starving of normal priority band (when implemented).
@@ -206,24 +205,24 @@ struct vpu_warm_boot_section {
* Enum for dvfs_mode boot param.
*/
enum vpu_governor {
- VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
- VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
- VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
- VPU_GOV_POWER_SAVE = 3, /* Power save governor */
- VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
+ VPU_GOV_DEFAULT = 0, /** Default Governor for the system */
+ VPU_GOV_MAX_PERFORMANCE = 1, /** Maximum performance governor */
+ VPU_GOV_ON_DEMAND = 2, /** On Demand frequency control governor */
+ VPU_GOV_POWER_SAVE = 3, /** Power save governor */
+ VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /** On Demand priority based governor */
};
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
u32 vpu_count;
- u32 pad0[5];
- /* Clock frequencies: 0x20 - 0xFF */
+ u32 reserved_0[5];
+ /** Clock frequencies: 0x20 - 0xFF */
u32 frequency;
- u32 pll[VPU_BOOT_PLL_COUNT][VPU_BOOT_PLL_OUT_COUNT];
+ u32 reserved_1[12];
u32 perf_clk_frequency;
- u32 pad1[42];
- /* Memory regions: 0x100 - 0x1FF */
+ u32 reserved_2[42];
+ /** Memory regions: 0x100 - 0x1FF */
u64 ipc_header_area_start;
u32 ipc_header_area_size;
u64 shared_region_base;
@@ -234,41 +233,24 @@ struct vpu_boot_params {
u32 global_aliased_pio_size;
u32 autoconfig;
struct vpu_boot_l2_cache_config cache_defaults[VPU_BOOT_L2_CACHE_CFG_NUM];
- u64 global_memory_allocator_base;
- u32 global_memory_allocator_size;
+ u32 reserved_3[3];
/**
* ShaveNN FW section VPU base address
* On VPU2.7 HW this address must be within 2GB range starting from L2C_PAGE_TABLE base
*/
u64 shave_nn_fw_base;
- u64 save_restore_ret_address; /* stores the address of FW's restore entry point */
- u32 pad2[43];
- /* IRQ re-direct numbers: 0x200 - 0x2FF */
+ u64 save_restore_ret_address; /** stores the address of FW's restore entry point */
+ u32 reserved_4[43];
+ /** IRQ re-direct numbers: 0x200 - 0x2FF */
s32 watchdog_irq_mss;
s32 watchdog_irq_nce;
- /* ARM -> VPU doorbell interrupt. ARM is notifying VPU of async command or compute job. */
+ /** ARM -> VPU doorbell interrupt. ARM is notifying VPU of async command or compute job. */
u32 host_to_vpu_irq;
- /* VPU -> ARM job done interrupt. VPU is notifying ARM of compute job completion. */
+ /** VPU -> ARM job done interrupt. VPU is notifying ARM of compute job completion. */
u32 job_done_irq;
- /* VPU -> ARM IRQ line to use to request MMU update. */
- u32 mmu_update_request_irq;
- /* ARM -> VPU IRQ line to use to notify of MMU update completion. */
- u32 mmu_update_done_irq;
- /* ARM -> VPU IRQ line to use to request power level change. */
- u32 set_power_level_irq;
- /* VPU -> ARM IRQ line to use to notify of power level change completion. */
- u32 set_power_level_done_irq;
- /* VPU -> ARM IRQ line to use to notify of VPU idle state change */
- u32 set_vpu_idle_update_irq;
- /* VPU -> ARM IRQ line to use to request counter reset. */
- u32 metric_query_event_irq;
- /* ARM -> VPU IRQ line to use to notify of counter reset completion. */
- u32 metric_query_event_done_irq;
- /* VPU -> ARM IRQ line to use to notify of preemption completion. */
- u32 preemption_done_irq;
- /* Padding. */
- u32 pad3[52];
- /* Silicon information: 0x300 - 0x3FF */
+ /** Padding. */
+ u32 reserved_5[60];
+ /** Silicon information: 0x300 - 0x3FF */
u32 host_version_id;
u32 si_stepping;
u64 device_id;
@@ -294,7 +276,7 @@ struct vpu_boot_params {
u32 crit_tracing_buff_size;
u64 verbose_tracing_buff_addr;
u32 verbose_tracing_buff_size;
- u64 verbose_tracing_sw_component_mask; /* TO BE REMOVED */
+ u64 verbose_tracing_sw_component_mask; /** TO BE REMOVED */
/**
* Mask of destinations to which logging messages are delivered; bitwise OR
* of values defined in vpu_trace_destination enum.
@@ -308,11 +290,7 @@ struct vpu_boot_params {
/** Mask of trace message formats supported by the driver */
u64 tracing_buff_message_format_mask;
u64 trace_reserved_1[2];
- /**
- * Period at which the VPU reads the temp sensor values into MMIO, on
- * platforms where that is necessary (in ms). 0 to disable reads.
- */
- u32 temp_sensor_period_ms;
+ u32 reserved_6;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
/**
@@ -347,11 +325,11 @@ struct vpu_boot_params {
* 1: IPC message required to save state on D0i3 entry flow.
*/
u32 d0i3_delayed_entry;
- /* Time spent by VPU in D0i3 state */
+ /** Time spent by VPU in D0i3 state */
u64 d0i3_residency_time_us;
- /* Value of VPU perf counter at the time of entering D0i3 state . */
+ /** Value of VPU perf counter at the time of entering D0i3 state . */
u64 d0i3_entry_vpu_ts;
- /*
+ /**
* The system time of the host operating system in microseconds.
* E.g the number of microseconds since 1st of January 1970, or whatever
* date the host operating system uses to maintain system time.
@@ -359,57 +337,52 @@ struct vpu_boot_params {
* The KMD is required to update this value on every VPU reset.
*/
u64 system_time_us;
- u32 pad4[2];
- /*
+ u32 reserved_7[2];
+ /**
* The delta between device monotonic time and the current value of the
* HW timestamp register, in ticks. Written by the firmware during boot.
* Can be used by the KMD to calculate device time.
*/
u64 device_time_delta_ticks;
- u32 pad7[14];
- /* Warm boot information: 0x400 - 0x43F */
- u32 warm_boot_sections_count;
- u32 warm_boot_start_address_reference;
- u32 warm_boot_section_info_address_offset;
- u32 pad5[13];
- /* Power States transitions timestamps: 0x440 - 0x46F*/
- struct {
- /* VPU_IDLE -> VPU_ACTIVE transition initiated timestamp */
+ u32 reserved_8[30];
+ /** Power States transitions timestamps: 0x440 - 0x46F*/
+ struct power_states_timestamps {
+ /** VPU_IDLE -> VPU_ACTIVE transition initiated timestamp */
u64 vpu_active_state_requested;
- /* VPU_IDLE -> VPU_ACTIVE transition completed timestamp */
+ /** VPU_IDLE -> VPU_ACTIVE transition completed timestamp */
u64 vpu_active_state_achieved;
- /* VPU_ACTIVE -> VPU_IDLE transition initiated timestamp */
+ /** VPU_ACTIVE -> VPU_IDLE transition initiated timestamp */
u64 vpu_idle_state_requested;
- /* VPU_ACTIVE -> VPU_IDLE transition completed timestamp */
+ /** VPU_ACTIVE -> VPU_IDLE transition completed timestamp */
u64 vpu_idle_state_achieved;
- /* VPU_IDLE -> VPU_STANDBY transition initiated timestamp */
+ /** VPU_IDLE -> VPU_STANDBY transition initiated timestamp */
u64 vpu_standby_state_requested;
- /* VPU_IDLE -> VPU_STANDBY transition completed timestamp */
+ /** VPU_IDLE -> VPU_STANDBY transition completed timestamp */
u64 vpu_standby_state_achieved;
} power_states_timestamps;
- /* VPU scheduling mode. Values defined by VPU_SCHEDULING_MODE_* macros. */
+ /** VPU scheduling mode. Values defined by VPU_SCHEDULING_MODE_* macros. */
u32 vpu_scheduling_mode;
- /* Present call period in milliseconds. */
+ /** Present call period in milliseconds. */
u32 vpu_focus_present_timer_ms;
- /* VPU ECC Signaling */
+ /** VPU ECC Signaling */
u32 vpu_uses_ecc_mca_signal;
- /* Values defined by POWER_PROFILE* macros */
+ /** Values defined by POWER_PROFILE* macros */
u32 power_profile;
- /* Microsecond value for DCT active cycle */
+ /** Microsecond value for DCT active cycle */
u32 dct_active_us;
- /* Microsecond value for DCT inactive cycle */
+ /** Microsecond value for DCT inactive cycle */
u32 dct_inactive_us;
- /* Unused/reserved: 0x488 - 0xFFF */
- u32 pad6[734];
+ /** Unused/reserved: 0x488 - 0xFFF */
+ u32 reserved_9[734];
};
-/* Magic numbers set between host and vpu to detect corruption of tracing init */
+/** Magic numbers set between host and vpu to detect corruption of tracing init */
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
-/* Tracing buffer message format definitions */
+/** Tracing buffer message format definitions */
#define VPU_TRACING_FORMAT_STRING 0
#define VPU_TRACING_FORMAT_MIPI 2
-/*
+/**
* Header of the tracing buffer.
* The below defined header will be stored at the beginning of
* each allocated tracing buffer, followed by a series of 256b
@@ -421,53 +394,55 @@ struct vpu_tracing_buffer_header {
* @see VPU_TRACING_BUFFER_CANARY
*/
u32 host_canary_start;
- /* offset from start of buffer for trace entries */
+ /** offset from start of buffer for trace entries */
u32 read_index;
- /* keeps track of wrapping on the reader side */
+ /** keeps track of wrapping on the reader side */
u32 read_wrap_count;
u32 pad_to_cache_line_size_0[13];
- /* End of first cache line */
+ /** End of first cache line */
/**
* Magic number set by host to detect corruption
* @see VPU_TRACING_BUFFER_CANARY
*/
u32 vpu_canary_start;
- /* offset from start of buffer from write start */
+ /** offset from start of buffer from write start */
u32 write_index;
- /* counter for buffer wrapping */
+ /** counter for buffer wrapping */
u32 wrap_count;
- /* legacy field - do not use */
+ /** legacy field - do not use */
u32 reserved_0;
/**
- * Size of the log buffer include this header (@header_size) and space
- * reserved for all messages. If @alignment` is greater that 0 the @Size
- * must be multiple of @Alignment.
+ * Size of the log buffer including this header (`header_size`) and space
+ * reserved for all messages. If `alignment` is greater than 0, the `size`
+ * must be a multiple of `alignment`.
*/
u32 size;
- /* Header version */
+ /** Header version */
u16 header_version;
- /* Header size */
+ /** Header size */
u16 header_size;
- /*
+ /**
* Format of the messages in the trace buffer
* 0 - null terminated string
* 1 - size + null terminated string
* 2 - MIPI-SysT encoding
*/
u32 format;
- /*
+ /**
* Message alignment
* 0 - messages are place 1 after another
* n - every message starts and multiple on offset
*/
- u32 alignment; /* 64, 128, 256 */
- /* Name of the logging entity, i.e "LRT", "LNN", "SHV0", etc */
+ u32 alignment; /** 64, 128, 256 */
+ /** Name of the logging entity, i.e "LRT", "LNN", "SHV0", etc */
char name[16];
u32 pad_to_cache_line_size_1[4];
- /* End of second cache line */
+ /** End of second cache line */
};
#pragma pack(pop)
#endif
+
+///@}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index bea3e9858aca..ce9e6c04897f 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -790,8 +790,11 @@ static int __init dma_resv_lockdep(void)
mmap_read_lock(mm);
ww_acquire_init(&ctx, &reservation_ww_class);
ret = dma_resv_lock(&obj, &ctx);
- if (ret == -EDEADLK)
+ if (ret) {
+ /* Only EDEADLK from the error injection is possible here */
+ WARN_ON(ret != -EDEADLK);
dma_resv_lock_slow(&obj, &ctx);
+ }
fs_reclaim_acquire(GFP_KERNEL);
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
i_mmap_lock_write(&mapping);
diff --git a/drivers/gpu/buddy.c b/drivers/gpu/buddy.c
index b27761246d4b..da5a1222f46b 100644
--- a/drivers/gpu/buddy.c
+++ b/drivers/gpu/buddy.c
@@ -3,8 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <kunit/test-bug.h>
-
+#include <linux/bug.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
@@ -12,6 +11,28 @@
#include <linux/gpu_buddy.h>
+/**
+ * gpu_buddy_assert - assert a condition in the buddy allocator
+ * @condition: condition expected to be true
+ *
+ * When CONFIG_KUNIT is enabled, evaluates @condition and, if false, triggers
+ * a WARN_ON() and also calls kunit_fail_current_test() so that any running
+ * kunit test is properly marked as failed. The stringified condition is
+ * included in the failure message for easy identification.
+ *
+ * When CONFIG_KUNIT is not enabled, this reduces to WARN_ON() so production
+ * builds retain the same warning semantics as before.
+ */
+#if IS_ENABLED(CONFIG_KUNIT)
+#include <kunit/test-bug.h>
+#define gpu_buddy_assert(condition) do { \
+ if (WARN_ON(!(condition))) \
+ kunit_fail_current_test("gpu_buddy_assert(" #condition ")"); \
+} while (0)
+#else
+#define gpu_buddy_assert(condition) WARN_ON(!(condition))
+#endif
+
static struct kmem_cache *slab_blocks;
static unsigned int
@@ -268,8 +289,8 @@ static int __force_merge(struct gpu_buddy *mm,
if (!gpu_buddy_block_is_free(buddy))
continue;
- WARN_ON(gpu_buddy_block_is_clear(block) ==
- gpu_buddy_block_is_clear(buddy));
+ gpu_buddy_assert(gpu_buddy_block_is_clear(block) !=
+ gpu_buddy_block_is_clear(buddy));
/*
* Advance to the next node when the current node is the buddy,
@@ -415,8 +436,7 @@ void gpu_buddy_fini(struct gpu_buddy *mm)
start = gpu_buddy_block_offset(mm->roots[i]);
__force_merge(mm, start, start + size, order);
- if (WARN_ON(!gpu_buddy_block_is_free(mm->roots[i])))
- kunit_fail_current_test("buddy_fini() root");
+ gpu_buddy_assert(gpu_buddy_block_is_free(mm->roots[i]));
gpu_block_free(mm, mm->roots[i]);
@@ -424,7 +444,7 @@ void gpu_buddy_fini(struct gpu_buddy *mm)
size -= root_size;
}
- WARN_ON(mm->avail != mm->size);
+ gpu_buddy_assert(mm->avail == mm->size);
for_each_free_tree(i)
kfree(mm->free_trees[i]);
@@ -541,7 +561,7 @@ static void __gpu_buddy_free_list(struct gpu_buddy *mm,
{
struct gpu_buddy_block *block, *on;
- WARN_ON(mark_dirty && mark_clear);
+ gpu_buddy_assert(!(mark_dirty && mark_clear));
list_for_each_entry_safe(block, on, objects, link) {
if (mark_clear)
diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c
index 43f4e7412d72..0497c7ecbc7a 100644
--- a/drivers/gpu/drm/bridge/waveshare-dsi.c
+++ b/drivers/gpu/drm/bridge/waveshare-dsi.c
@@ -66,7 +66,12 @@ static int ws_bridge_attach_dsi(struct ws_bridge *ws)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = 2;
+ dsi->lanes = drm_of_get_data_lanes_count_ep(dev->of_node, 0, 0, 1, 4);
+ if (dsi->lanes < 0) {
+ dev_warn(dev, "Invalid or missing DSI lane count %d, falling back to 2 lanes\n",
+ dsi->lanes);
+ dsi->lanes = 2; /* Old DT backward compatibility */
+ }
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0)
@@ -80,11 +85,6 @@ static int ws_bridge_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
- int ret;
-
- ret = ws_bridge_attach_dsi(ws);
- if (ret)
- return ret;
return drm_bridge_attach(encoder, ws->next_bridge,
&ws->bridge, flags);
@@ -179,7 +179,7 @@ static int ws_bridge_probe(struct i2c_client *i2c)
ws->bridge.of_node = dev->of_node;
devm_drm_bridge_add(dev, &ws->bridge);
- return 0;
+ return ws_bridge_attach_dsi(ws);
}
static const struct of_device_id ws_bridge_of_ids[] = {
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 6236ec46d62a..46c465bce98c 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -204,7 +204,7 @@ void drm_client_buffer_delete(struct drm_client_buffer *buffer)
}
EXPORT_SYMBOL(drm_client_buffer_delete);
-static struct drm_client_buffer *
+struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
u32 format, u32 handle, u32 pitch)
{
@@ -265,6 +265,7 @@ err_delete:
kfree(buffer);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL(drm_client_buffer_create);
/**
* drm_client_buffer_vmap_local - Map DRM client buffer into address space
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index c26926babc2a..d1e93588234f 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -72,17 +72,10 @@ static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static void psb_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_object *obj = fb->obj[0];
drm_fb_helper_fini(fb_helper);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-
- drm_gem_object_put(obj);
-
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
}
@@ -105,78 +98,76 @@ static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = client->dev;
+ struct drm_file *file = client->file;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info = fb_helper->info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd = { };
- int size;
- int ret;
+ u32 fourcc, pitch;
+ u64 size;
+ const struct drm_format_info *format;
+ struct drm_client_buffer *buffer;
struct psb_gem_object *backing;
struct drm_gem_object *obj;
- u32 bpp, depth;
+ u32 handle;
+ int ret;
/* No 24-bit packed mode */
if (sizes->surface_bpp == 24) {
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
}
- bpp = sizes->surface_bpp;
- depth = sizes->surface_depth;
-
- /*
- * If the mode does not fit in 32 bit then switch to 16 bit to get
- * a console on full resolution. The X mode setting server will
- * allocate its own 32-bit GEM framebuffer.
- */
- size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
- sizes->surface_height;
- size = ALIGN(size, PAGE_SIZE);
-
- if (size > dev_priv->vram_stolen_size) {
- sizes->surface_bpp = 16;
- sizes->surface_depth = 16;
- }
- bpp = sizes->surface_bpp;
- depth = sizes->surface_depth;
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+try_psb_gem_create:
+ fourcc = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ format = drm_get_format_info(dev, fourcc, DRM_FORMAT_MOD_LINEAR);
+ pitch = ALIGN(drm_format_info_min_pitch(format, 0, sizes->surface_width), SZ_64);
+ size = ALIGN(pitch * sizes->surface_height, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
- if (IS_ERR(backing))
- return PTR_ERR(backing);
+ if (IS_ERR(backing)) {
+ ret = PTR_ERR(backing);
+ if (ret == -EBUSY && sizes->surface_bpp > 16) {
+ /*
+ * If the mode does not fit in 32 bit then switch to 16 bit to
+ * get a console on full resolution. User-space compositors will
+ * allocate their own 32-bit framebuffers.
+ */
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ goto try_psb_gem_create;
+ }
+ return ret;
+ }
obj = &backing->base;
- fb = psb_framebuffer_create(dev,
- drm_get_format_info(dev, mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd, obj);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
+ ret = drm_gem_handle_create(file, obj, &handle);
+ if (ret)
goto err_drm_gem_object_put;
+
+ buffer = drm_client_buffer_create(client, sizes->surface_width, sizes->surface_height,
+ fourcc, handle, pitch);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ goto err_drm_gem_handle_delete;
}
fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
- fb_helper->fb = fb;
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
info->fbops = &psb_fbdev_fb_ops;
/* Accessed stolen memory directly */
info->screen_base = dev_priv->vram_addr + backing->offset;
- info->screen_size = size;
+ info->screen_size = obj->size;
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fix.smem_start = dev_priv->stolen_base + backing->offset;
- info->fix.smem_len = size;
+ info->fix.smem_len = obj->size;
info->fix.ywrapstep = 0;
info->fix.ypanstep = 0;
info->fix.mmio_start = pci_resource_start(pdev, 0);
@@ -186,10 +177,18 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
+ dev_dbg(dev->dev, "allocated %dx%d fb\n", buffer->fb->width, buffer->fb->height);
+
+ /* The handle is only needed for creating the framebuffer.*/
+ drm_gem_handle_delete(file, handle);
+
+ /* The framebuffer still holds a references on the GEM object. */
+ drm_gem_object_put(obj);
return 0;
+err_drm_gem_handle_delete:
+ drm_gem_handle_delete(file, handle);
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fe1f43f0abff..37a9f666c0f2 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -12,111 +12,25 @@
#include "framebuffer.h"
#include "psb_drv.h"
-static const struct drm_framebuffer_funcs psb_fb_funcs = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
-};
-
-/**
- * psb_framebuffer_init - initialize a framebuffer
- * @dev: our DRM device
- * @fb: framebuffer to set up
- * @mode_cmd: mode description
- * @obj: backing object
- *
- * Configure and fill in the boilerplate for our frame buffer. Return
- * 0 on success or an error code if we fail.
- */
-static int psb_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *fb,
- const struct drm_format_info *info,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+static struct drm_framebuffer *
+psb_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *cmd)
{
- int ret;
-
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
if (!info->depth || info->cpp[0] > 4)
- return -EINVAL;
-
- if (mode_cmd->pitches[0] & 63)
- return -EINVAL;
-
- drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
- fb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
- if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-/**
- * psb_framebuffer_create - create a framebuffer backed by gt
- * @dev: our DRM device
- * @info: pixel format information
- * @mode_cmd: the description of the requested mode
- * @obj: the backing object
- *
- * Create a framebuffer object backed by the gt, and fill in the
- * boilerplate required
- *
- * TODO: review object references
- */
-struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
- const struct drm_format_info *info,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- struct drm_framebuffer *fb;
- int ret;
-
- fb = kzalloc_obj(*fb);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
- ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj);
- if (ret) {
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
-}
-
-/**
- * psb_user_framebuffer_create - create framebuffer
- * @dev: our DRM device
- * @filp: client file
- * @cmd: mode request
- *
- * Create a new framebuffer backed by a userspace GEM object
- */
-static struct drm_framebuffer *psb_user_framebuffer_create
- (struct drm_device *dev, struct drm_file *filp,
- const struct drm_format_info *info,
- const struct drm_mode_fb_cmd2 *cmd)
-{
- struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ return ERR_PTR(-EINVAL);
/*
- * Find the GEM object and thus the gtt range object that is
- * to back this space
+ * Pitch must be aligned to 64 bytes.
*/
- obj = drm_gem_object_lookup(filp, cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- /* Let the core code do all the work */
- fb = psb_framebuffer_create(dev, info, cmd, obj);
- if (IS_ERR(fb))
- drm_gem_object_put(obj);
+ if (cmd->pitches[0] & 63)
+ return ERR_PTR(-EINVAL);
- return fb;
+ return drm_gem_fb_create(dev, filp, info, cmd);
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0b27112ec46f..db197b865b90 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -592,12 +592,6 @@ struct psb_ops {
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
-/* framebuffer */
-struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
- const struct drm_format_info *info,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
/* fbdev */
#if defined(CONFIG_DRM_FBDEV_EMULATION)
int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 4b77be94348d..11e7441de63b 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -447,7 +447,7 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer
}
/* Imported buffers are assumed to be WriteCombined with uncached reads */
- gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
+ gud_flush_damage(gdrm, fb, src, !drm_gem_is_imported(fb->obj[0]), damage);
}
int gud_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 006a72ed5064..7a8765c0c1ed 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -598,8 +598,8 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct device *dev = drm_dev->dev;
- struct device_link **domain_links __free(kfree) = NULL;
struct dev_pm_domain_list *domains = NULL;
+ struct device_link **domain_links = NULL;
int domain_count;
int link_count;
@@ -608,23 +608,30 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (domain_count < 0)
- return domain_count;
+ if (domain_count < 0) {
+ err = domain_count;
+ goto out;
+ }
- if (domain_count <= 1)
- return 0;
+ if (domain_count <= 1) {
+ err = 0;
+ goto out;
+ }
if (domain_count > ARRAY_SIZE(ROGUE_PD_NAMES)) {
drm_err(drm_dev, "%s() only supports %zu domains on Rogue",
__func__, ARRAY_SIZE(ROGUE_PD_NAMES));
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
link_count = domain_count - 1;
domain_links = kzalloc_objs(*domain_links, link_count);
- if (!domain_links)
- return -ENOMEM;
+ if (!domain_links) {
+ err = -ENOMEM;
+ goto out;
+ }
const struct dev_pm_domain_attach_data pd_attach_data = {
.pd_names = ROGUE_PD_NAMES,
@@ -634,7 +641,7 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
err = dev_pm_domain_attach_list(dev, &pd_attach_data, &domains);
if (err < 0)
- return err;
+ goto err_free_links;
for (i = 0; i < link_count; i++) {
struct device_link *link;
@@ -650,17 +657,26 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev)
domain_links[i] = link;
}
- pvr_dev->power = (struct pvr_device_power){
- .domains = domains,
- .domain_links = no_free_ptr(domain_links),
- };
-
- return 0;
+ err = 0;
+ goto out;
err_unlink:
while (--i >= 0)
device_link_del(domain_links[i]);
+ dev_pm_domain_detach_list(domains);
+ domains = NULL;
+
+err_free_links:
+ kfree(domain_links);
+ domain_links = NULL;
+
+out:
+ pvr_dev->power = (struct pvr_device_power){
+ .domains = domains,
+ .domain_links = domain_links,
+ };
+
return err;
}
@@ -668,14 +684,16 @@ void pvr_power_domains_fini(struct pvr_device *pvr_dev)
{
struct pvr_device_power *pvr_power = &pvr_dev->power;
- int i = (int)pvr_power->domains->num_pds - 1;
+ if (!pvr_power->domains)
+ goto out;
- while (--i >= 0)
+ for (int i = (int)pvr_power->domains->num_pds - 2; i >= 0; --i)
device_link_del(pvr_power->domain_links[i]);
dev_pm_domain_detach_list(pvr_power->domains);
kfree(pvr_power->domain_links);
+out:
*pvr_power = (struct pvr_device_power){ 0 };
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3c7d2e5b3850..0e8de6d4b36f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -144,7 +144,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
drm_prime_gem_destroy(&bo->base, bo->sg);
/*
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f5f0e2c505b6..0fce4452dbe7 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1814,6 +1814,13 @@ static const struct panel_delay delay_200_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200_d100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 100,
+};
+
static const struct panel_delay delay_200_500_e200_d200 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -2014,7 +2021,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200_d100, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 822633da741e..3a7fce428898 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -702,7 +702,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
resident_size,
drm_vma_node_start(&bo->base.base.vma_node));
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
if (bo->base.base.dma_buf)
gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 4b4575dd6e90..6d14b0269574 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -666,7 +666,7 @@ static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
resident_size,
drm_vma_node_start(&bo->base.base.vma_node));
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
if (bo->base.base.dma_buf)
gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e6ee35406165..13fa55aed3da 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1418,48 +1418,12 @@ static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
- struct drm_sched_entity *s_entity;
int i;
drm_sched_wqueue_stop(sched);
- for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
- struct drm_sched_rq *rq = sched->sched_rq[i];
-
- spin_lock(&rq->lock);
- list_for_each_entry(s_entity, &rq->entities, list) {
- /*
- * Prevents reinsertion and marks job_queue as idle,
- * it will be removed from the rq in drm_sched_entity_fini()
- * eventually
- *
- * FIXME:
- * This lacks the proper spin_lock(&s_entity->lock) and
- * is, therefore, a race condition. Most notably, it
- * can race with drm_sched_entity_push_job(). The lock
- * cannot be taken here, however, because this would
- * lead to lock inversion -> deadlock.
- *
- * The best solution probably is to enforce the life
- * time rule of all entities having to be torn down
- * before their scheduler. Then, however, locking could
- * be dropped alltogether from this function.
- *
- * For now, this remains a potential race in all
- * drivers that keep entities alive for longer than
- * the scheduler.
- *
- * The READ_ONCE() is there to make the lockless read
- * (warning about the lockless write below) slightly
- * less broken...
- */
- if (!READ_ONCE(s_entity->stopped))
- dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
- s_entity->stopped = true;
- }
- spin_unlock(&rq->lock);
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++)
kfree(sched->sched_rq[i]);
- }
/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
wake_up_all(&sched->job_scheduled);
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 82a41a456b0a..a5a5a35a87b0 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -421,7 +421,7 @@ static void drm_sched_change_priority(struct kunit *test)
static struct kunit_case drm_sched_priority_tests[] = {
KUNIT_CASE(drm_sched_priorities),
- KUNIT_CASE(drm_sched_change_priority),
+ KUNIT_CASE_SLOW(drm_sched_change_priority),
{}
};
@@ -546,7 +546,7 @@ static void drm_sched_test_credits(struct kunit *test)
}
static struct kunit_case drm_sched_credits_tests[] = {
- KUNIT_CASE(drm_sched_test_credits),
+ KUNIT_CASE_SLOW(drm_sched_test_credits),
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 1f93bc5a3d02..f45ba47b4ba8 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -556,7 +556,7 @@ static void vc4_free_object(struct drm_gem_object *gem_bo)
mutex_lock(&vc4->bo_lock);
/* If the object references someone else's memory, we can't cache it.
*/
- if (gem_bo->import_attach) {
+ if (drm_gem_is_imported(gem_bo)) {
vc4_bo_destroy(bo);
goto out;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index ad8cbd727b80..2d3df5e621c1 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1250,7 +1250,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* Not sure it's safe to purge imported BOs. Let's just assume it's
* not until proven otherwise.
*/
- if (gem_obj->import_attach) {
+ if (drm_gem_is_imported(gem_obj)) {
DRM_DEBUG("madvise not supported on imported BOs\n");
ret = -EINVAL;
goto out_put_gem;
diff --git a/drivers/gpu/tests/gpu_buddy_test.c b/drivers/gpu/tests/gpu_buddy_test.c
index 450e71deed90..5429010f34d3 100644
--- a/drivers/gpu/tests/gpu_buddy_test.c
+++ b/drivers/gpu/tests/gpu_buddy_test.c
@@ -910,7 +910,7 @@ static struct kunit_case gpu_buddy_tests[] = {
KUNIT_CASE(gpu_test_buddy_alloc_contiguous),
KUNIT_CASE(gpu_test_buddy_alloc_clear),
KUNIT_CASE(gpu_test_buddy_alloc_range_bias),
- KUNIT_CASE(gpu_test_buddy_fragmentation_performance),
+ KUNIT_CASE_SLOW(gpu_test_buddy_fragmentation_performance),
KUNIT_CASE(gpu_test_buddy_alloc_exceeds_max_order),
{}
};
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index c972a8a3385b..49a21f3dcb36 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -196,6 +196,9 @@ struct drm_client_buffer {
};
struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 handle, u32 pitch);
+struct drm_client_buffer *
drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_buffer_delete(struct drm_client_buffer *buffer);
int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);