summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2021-04-14 00:06:34 +0300
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-04-14 00:15:09 +0300
commit213cc929cbfd7962164420b300f9a6c60aaff189 (patch)
tree163d5e50169ce0399add99cba49a760214de05db /drivers/gpu
parentcd951b3971cdc1f8c76b075f2c97ff357bf141e2 (diff)
parentd434405aaab7d0ebc516b68a8fc4100922d7f5ef (diff)
downloadlinux-213cc929cbfd7962164420b300f9a6c60aaff189.tar.xz
Merge drm/drm-fixes into drm-next
msm-next pull request has a baseline with stuff from -fixes, roll forward first. Some simple conflicts in amdgpu, ttm and one in i915 where git gets confused and tries to add the same function twice. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c22
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c108
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c11
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c8
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c31
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/sor.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h1
-rw-r--r--drivers/gpu/host1x/bus.c10
31 files changed, 248 insertions, 124 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 6d38c5c17f23..db69f19ab5bc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **pages = pvec + pinned;
ret = pin_user_pages_fast(ptr, num_pages,
- !userptr->ro ? FOLL_WRITE : 0, pages);
+ FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
+ pages);
if (ret < 0) {
unpin_user_pages(pvec, pinned);
kvfree(pvec);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index c277d2fc50c6..b9a4b7670a89 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,7 +13,6 @@
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index e21fb14d5e07..833d0c1be4f1 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
return;
}
+ if (!pkg->package.count) {
+ DRM_DEBUG_DRIVER("no connection in _DSM\n");
+ return;
+ }
+
connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id = &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
+ union acpi_object *connector_id;
+ union acpi_object *info;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+ DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+ continue;
+ }
+
+ connector_id = &obj->package.elements[0];
+ info = &obj->package.elements[1];
+ if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+ DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+ continue;
+ }
+
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n",
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index d1a9841adeed..e6a88c8cbd69 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto err_kms;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index dbfe39e2f7f6..ffdc492c5bc5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
u32 bus_format = imx_ldb_ch->bus_format;
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
if (mode->clock > 170000) {
dev_warn(ldb->dev,
"%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
if (!channel->ldb)
- break;
+ continue;
ret = imx_ldb_register(drm, channel);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7e553d3efeb2..ce13d49e615b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 5ccc9da455a1..c35b06b46fcc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
- else
+ else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71c917f909af..91cf46f84025 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
else
bit = a6xx_gmu_oob_bits[state].ack_new;
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
/* Enable CPU control of SPTP power power collapse */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index ba8e9d3cf0fe..d553f62f4eeb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
u32 *buf = msm_gem_get_vaddr(obj);
+ bool ret = false;
if (IS_ERR(buf))
- return;
+ return false;
/*
- * If the lowest nibble is 0xa that is an indication that this microcode
- * has been patched. The actual version is in dword [3] but we only care
- * about the patchlevel which is the lowest nibble of dword [3]
- *
- * Otherwise check that the firmware is greater than or equal to 1.90
- * which was the first version that had this fix built in
+ * Targets up to a640 (a618, a630 and a640) need to check for a
+ * microcode version that is patched to support the whereami opcode or
+ * one that is new enough to include it by default.
*/
- if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
- a6xx_gpu->has_whereami = true;
- else if ((buf[0] & 0xfff) > 0x190)
- a6xx_gpu->has_whereami = true;
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
+ adreno_is_a640(adreno_gpu)) {
+ /*
+ * If the lowest nibble is 0xa that is an indication that this
+ * microcode has been patched. The actual version is in dword
+ * [3] but we only care about the patchlevel which is the lowest
+ * nibble of dword [3]
+ *
+ * Otherwise check that the firmware is greater than or equal
+ * to 1.90 which was the first version that had this fix built
+ * in
+ */
+ if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+ (buf[0] & 0xfff) >= 0x190) {
+ a6xx_gpu->has_whereami = true;
+ ret = true;
+ goto out;
+ }
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a630 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x190);
+ } else {
+ /*
+ * a650 tier targets don't need whereami but still need to be
+ * equal to or newer than 0.95 for other security fixes
+ */
+ if (adreno_is_a650(adreno_gpu)) {
+ if ((buf[0] & 0xfff) >= 0x095) {
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a650 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x095);
+ }
+
+ /*
+ * When a660 is added those targets should return true here
+ * since those have all the critical security fixes built in
+ * from the start
+ */
+ }
+out:
msm_gem_put_vaddr(obj);
+ return ret;
}
static int a6xx_ucode_init(struct msm_gpu *gpu)
@@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
- a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
+ if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ return -EPERM;
+ }
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -1177,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
@@ -1350,35 +1401,26 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
struct opp_table *opp_table;
- struct nvmem_cell *cell;
u32 supp_hw = UINT_MAX;
- void *buf;
+ u16 speedbin;
+ int ret;
- cell = nvmem_cell_get(dev, "speed_bin");
+ ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
- if (PTR_ERR(cell) == -ENOENT)
+ if (ret == -ENOENT) {
return 0;
- else if (IS_ERR(cell)) {
- DRM_DEV_ERROR(dev,
- "failed to read speed-bin. Some OPPs may not be supported by hardware");
- goto done;
- }
-
- buf = nvmem_cell_read(cell, NULL);
- if (IS_ERR(buf)) {
- nvmem_cell_put(cell);
+ } else if (ret) {
DRM_DEV_ERROR(dev,
- "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+ ret);
goto done;
}
+ speedbin = le16_to_cpu(speedbin);
- supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
-
- kfree(buf);
- nvmem_cell_put(cell);
+ supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
done:
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 8981cfa9dbc3..92e6f1b94738 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->merge_3d)
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 5a8e3e1fc48c..85f2c3564c96 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -43,6 +43,8 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
+
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
+ if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
- dpu_kms_parse_data_bus_icc_path(dpu_kms);
-
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
ddev = dpu_kms->dev;
+ WARN_ON(!(dpu_kms->num_paths));
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
- icc_set_bw(dpu_kms->path[i], 0,
- dpu_kms->catalog->perf.min_dram_ib);
+ icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 1c6e1d2b947c..7c22bfe0fc7d 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -32,6 +32,8 @@ struct dp_aux_private {
struct drm_dp_aux dp_aux;
};
+#define MAX_AUX_RETRIES 5
+
static const char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
@@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
+ if (aux->native) {
+ aux->retry_cnt++;
+ if (!(aux->retry_cnt % MAX_AUX_RETRIES))
+ dp_catalog_aux_update_cfg(aux->catalog);
+ }
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index a45fe95aff49..3dc65877fa10 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
- pll = msm_dsi_pll_7nm_init(pdev, id);
+ pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 3405982a092c..bbecb1de5678 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
#else
static inline struct msm_dsi_pll *
-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
index 93bf142e4a4e..e29b3bfd63d1 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
@@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
- pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct dsi_pll_config *config = &pll_7nm->pll_configuration;
void __iomem *base = pll_7nm->mmio;
u64 ref_clk = pll_7nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
- multiplier = 1 << 18;
+ multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
@@ -852,7 +852,8 @@ err_base_clk_hw:
return ret;
}
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
- if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+ if (type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = (unsigned long)5000000000ULL;
/* workaround for max rate overflowing on 32-bit builds: */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e9c6544b6a01..fab09e7c6efc 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
+ int crtc_index;
struct drm_crtc *crtc;
- for_each_crtc_mask(kms->dev, crtc, crtc_mask)
- mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ crtc_index = drm_crtc_index(crtc);
+ mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
+ }
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 94525ac76d4e..196907689c82 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -570,6 +570,7 @@ err_free_priv:
kfree(priv);
err_put_drm_dev:
drm_dev_put(ddev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1072,6 +1073,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return 0;
return drm_mode_config_helper_suspend(ddev);
}
@@ -1079,6 +1084,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
drm_mode_config_helper_resume(ddev);
}
@@ -1311,6 +1320,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index ad2703698b05..cd59a5918038 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
- DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 4735251a394d..d8151a89e163 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -157,7 +157,6 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
- struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
- lockdep_register_key(&kms->commit_lock_keys[i]);
- __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
- &kms->commit_lock_keys[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+ mutex_init(&kms->commit_lock[i]);
kms->funcs = funcs;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 196612addfd6..1c9c0cdf85db 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
- if (disp->disp->object.oclass >= GK104_DISP) {
+ /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
+ * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
+ * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
+ * small page allocations in prepare_fb(). When this is implemented, we should also force
+ * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
+ * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
+ * large pages.
+ */
+ if (disp->disp->object.oclass >= GM107_DISP) {
dev->mode_config.cursor_width = 256;
dev->mode_config.cursor_height = 256;
+ } else if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 128;
+ dev->mode_config.cursor_height = 128;
} else {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2d5d68fc15c2..3e09df0472ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -548,6 +548,10 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (!ttm_dma)
return;
+ if (!ttm_dma->pages) {
+ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+ return;
+ }
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
@@ -580,6 +584,10 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (!ttm_dma)
return;
+ if (!ttm_dma->pages) {
+ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+ return;
+ }
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 022a8d58e83a..5f1722b040f4 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -2149,12 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg)
{
struct mipi_dsi_packet pkt;
- int err;
+ int ret;
u32 r;
- err = mipi_dsi_create_packet(&pkt, msg);
- if (err)
- return err;
+ ret = mipi_dsi_create_packet(&pkt, msg);
+ if (ret < 0)
+ return ret;
WARN_ON(!dsi_bus_is_locked(dsi));
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index af381d756ac1..5fbfb71ca3d9 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -37,6 +37,7 @@ struct dsic_panel_data {
u32 height_mm;
u32 max_hs_rate;
u32 max_lp_rate;
+ bool te_support;
};
struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (r)
- goto err;
+ if (ddata->panel_data->te_support) {
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+ }
/* possible panel bug */
msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
.height_mm = 0,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = true,
};
static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
.height_mm = 88,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
.height_mm = 89,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct of_device_id dsicm_of_match[] = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index ba8c6038cd63..ca3761772211 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
};
-static void rcar_du_encoder_release(struct drm_device *dev, void *res)
-{
- struct rcar_du_encoder *renc = res;
-
- drm_encoder_cleanup(&renc->base);
- kfree(renc);
-}
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_bridge *bridge;
- int ret;
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
@@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOLINK;
}
- renc = kzalloc(sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
- ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
- if (ret < 0) {
- kfree(renc);
- return ret;
- }
+ renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+ &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ NULL);
+ if (!renc)
+ return -ENOMEM;
- ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
- renc);
- if (ret)
- return ret;
+ renc->output = output;
/*
* Attach the bridge to the encoder. The bridge will create the
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index da6afe7f0c7d..c9385cfd0fc1 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1700,6 +1700,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n",
state->pclk);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1710,11 +1715,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-
- err = clk_set_rate(dc->clk, state->pclk);
- if (err < 0)
- dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
- dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2513,22 +2513,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
- struct device_link *link;
- struct device *partner;
+ struct device *companion;
+ struct tegra_dc *parent;
- partner = driver_find_device(dc->dev->driver, NULL, NULL,
- tegra_dc_match_by_pipe);
- if (!partner)
+ companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+ tegra_dc_match_by_pipe);
+ if (!companion)
return -EPROBE_DEFER;
- link = device_link_add(dc->dev, partner, flags);
- if (!link) {
- dev_err(dc->dev, "failed to link controllers\n");
- return -EINVAL;
- }
+ parent = dev_get_drvdata(companion);
+ dc->client.parent = &parent->client;
- dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+ dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
}
return 0;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f02a035dda45..7b88261f57bb 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible.
*/
if (sor->rst) {
+ err = pm_runtime_resume_and_get(sor->dev);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
err = reset_control_acquire(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
}
reset_control_release(sor->rst);
+ pm_runtime_put(sor->dev);
}
err = clk_prepare_enable(sor->clk_safe);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 269390bc586e..76657dcdf9b0 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
if (crtc_data->hvs_output == 5)
return 32;
+ /*
+ * It looks like in some situations, we will overflow
+ * the PixelValve FIFO (with the bit 10 of PV stat being
+ * set) and stall the HVS / PV, eventually resulting in
+ * a page flip timeout.
+ *
+ * Displaying the video overlay during a playback with
+ * Kodi on an RPi3 seems to be a great solution with a
+ * failure rate around 50%.
+ *
+ * Removing 1 from the FIFO full level however
+ * seems to completely remove that issue.
+ */
+ if (!vc4->hvs->hvs5)
+ return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c76e73a452e0..19161b6ab27f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1150,7 +1150,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = new_plane_state->src_y;
plane->state->src_w = new_plane_state->src_w;
plane->state->src_h = new_plane_state->src_h;
- plane->state->src_h = new_plane_state->src_h;
plane->state->alpha = new_plane_state->alpha;
plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
plane->state->rotation = new_plane_state->rotation;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 30d9adf31c84..9f14d99c763c 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
if (IS_ERR(drm_dev)) {
ret = PTR_ERR(drm_dev);
- goto fail;
+ goto fail_dev;
}
drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev_put(drm_dev);
-fail:
+fail_dev:
kfree(drm_info);
+ front_info->drm_info = NULL;
+fail:
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
index 3adacba9a23b..e5f4314899ee 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -16,7 +16,6 @@
struct drm_connector;
struct xen_drm_front_drm_info;
-struct xen_drm_front_drm_info;
int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
struct drm_connector *connector);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 347fb962b6c9..68a766ff0e9d 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister);
/**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
* @client: host1x client
+ * @key: lock class key for the client-specific mutex
*
* Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be
@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+ struct lock_class_key *key)
{
struct host1x *host1x;
int err;
INIT_LIST_HEAD(&client->list);
- mutex_init(&client->lock);
+ __mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
mutex_lock(&devices_lock);
@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
return 0;
}
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
/**
* host1x_client_unregister() - unregister a host1x client