summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-02-06 05:52:08 +0300
committerDave Airlie <airlied@redhat.com>2026-02-06 05:52:15 +0300
commit3c5ab2407aaa116d5c3c7b0fa8dab395ecf24aec (patch)
tree986b196dee11ac37fd3e91fa70d76cca8f2fb450 /drivers/gpu
parent1099b651ae4d1de129adc073a657c03c94ef3d22 (diff)
parent69674c1c704c0199ca7a3947f3cdcd575973175d (diff)
downloadlinux-3c5ab2407aaa116d5c3c7b0fa8dab395ecf24aec.tar.xz
Merge tag 'drm-misc-next-fixes-2026-02-05' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
Several fixes for amdxdna around PM handling, error reporting and memory safety, a compilation fix for ilitek-ili9882t, a NULL pointer dereference fix for imx8qxp-pixel-combiner and several PTE fixes for nouveau Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <mripard@redhat.com> Link: https://patch.msgid.link/20260205-refreshing-natural-vole-4c73af@houat
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h16
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c4
5 files changed, 69 insertions, 39 deletions
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 74eda8b54023..99a9280edb4f 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -348,7 +348,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0]->bridge.next_bridge)
+ if (i == 1 && pc->ch[0] && pc->ch[0]->bridge.next_bridge)
drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0e409414f44d..1c2523e2f92e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -10,7 +10,7 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 4
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_PATCHLEVEL 2
/*
* 1.1.1:
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
* 1.4.1:
* - add variable page sizes and compression for Turing+
+ * 1.4.2:
+ * - tell userspace LPTE/SPTE races are fixed.
*/
#include <linux/notifier.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index f95c58b67633..19a7407cf702 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -53,7 +53,7 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
}
}
- if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
+ if (!(pgt = kzalloc(sizeof(*pgt) + (sizeof(pgt->pte[0]) * lpte), GFP_KERNEL)))
return NULL;
pgt->page = page ? page->shift : 0;
pgt->sparse = sparse;
@@ -208,7 +208,7 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
*/
for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
const u32 pten = min(sptn - spti, ptes);
- pgt->pte[lpti] -= pten;
+ pgt->pte[lpti].s.sptes -= pten;
ptes -= pten;
}
@@ -218,9 +218,9 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
/* Skip over any LPTEs that still have valid SPTEs. */
- if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
+ if (pgt->pte[pteb].s.sptes) {
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
- if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
+ if (!(pgt->pte[ptei].s.sptes))
break;
}
continue;
@@ -232,24 +232,27 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
*
* Determine how many LPTEs need to transition state.
*/
- pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ pgt->pte[ptei].s.spte_valid = false;
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
- if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
+ if (pgt->pte[ptei].s.sptes)
break;
- pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ pgt->pte[ptei].s.spte_valid = false;
}
- if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ if (pgt->pte[pteb].s.sparse) {
TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
- } else
- if (pair->func->invalid) {
- /* If the MMU supports it, restore the LPTE to the
- * INVALID state to tell the MMU there is no point
- * trying to fetch the corresponding SPTEs.
- */
- TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
- pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+ } else if (!pgt->pte[pteb].s.lpte_valid) {
+ if (pair->func->invalid) {
+ /* If the MMU supports it, restore the LPTE to the
+ * INVALID state to tell the MMU there is no point
+ * trying to fetch the corresponding SPTEs.
+ */
+ TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
+ pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+ }
+ } else {
+ TRA(it, "LPTE %05x: V %d PTEs", pteb, ptes);
}
}
}
@@ -280,6 +283,15 @@ nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
+ if (desc->type == LPT && (pgt->refs[0] || pgt->refs[1])) {
+ for (u32 lpti = ptei; ptes; lpti++) {
+ pgt->pte[lpti].s.lptes--;
+ if (pgt->pte[lpti].s.lptes == 0)
+ pgt->pte[lpti].s.lpte_valid = false;
+ ptes--;
+ }
+ }
+
/* PT no longer needed? Destroy it. */
if (!pgt->refs[type]) {
it->lvl++;
@@ -307,7 +319,7 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
*/
for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
const u32 pten = min(sptn - spti, ptes);
- pgt->pte[lpti] += pten;
+ pgt->pte[lpti].s.sptes += pten;
ptes -= pten;
}
@@ -317,9 +329,9 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
/* Skip over any LPTEs that already have valid SPTEs. */
- if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
+ if (pgt->pte[pteb].s.spte_valid) {
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
- if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
+ if (!pgt->pte[ptei].s.spte_valid)
break;
}
continue;
@@ -331,14 +343,16 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
*
* Determine how many LPTEs need to transition state.
*/
- pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ pgt->pte[ptei].s.spte_valid = true;
+ pgt->pte[ptei].s.lpte_valid = false;
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
- if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
+ if (pgt->pte[ptei].s.spte_valid)
break;
- pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ pgt->pte[ptei].s.spte_valid = true;
+ pgt->pte[ptei].s.lpte_valid = false;
}
- if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ if (pgt->pte[pteb].s.sparse) {
const u32 spti = pteb * sptn;
const u32 sptc = ptes * sptn;
/* The entire LPTE is marked as sparse, we need
@@ -374,6 +388,15 @@ nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
if (desc->type == SPT)
nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
+ if (desc->type == LPT) {
+ for (u32 lpti = ptei; ptes; lpti++) {
+ pgt->pte[lpti].s.spte_valid = false;
+ pgt->pte[lpti].s.lpte_valid = true;
+ pgt->pte[lpti].s.lptes++;
+ ptes--;
+ }
+ }
+
return true;
}
@@ -386,7 +409,8 @@ nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
} else
if (desc->type == LPT) {
- memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
+ union nvkm_pte_tracker sparse = { .s.sparse = 1 };
+ memset32(&pgt->pte[ptei].u, sparse.u, ptes);
}
}
@@ -398,7 +422,7 @@ nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 pte
memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
else
if (it->desc->type == LPT)
- memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
+ memset32(&pt->pte[ptei].u, 0x00, ptes);
return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
}
@@ -445,9 +469,9 @@ nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
* the SPTEs on some GPUs.
*/
for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
- bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ bool spte = !!pgt->pte[ptei].s.sptes;
for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
- bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ bool next = !!pgt->pte[ptei].s.sptes;
if (spte != next)
break;
}
@@ -457,11 +481,11 @@ nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
desc->func->sparse(vmm, pt, pteb, ptes);
else
desc->func->invalid(vmm, pt, pteb, ptes);
- memset(&pgt->pte[pteb], 0x00, ptes);
+ memset32(&pgt->pte[pteb].u, 0x00, ptes);
} else {
desc->func->unmap(vmm, pt, pteb, ptes);
while (ptes--)
- pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
+ pgt->pte[pteb++].s.spte_valid = true;
}
}
} else {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 4586a425dbe4..4ec0a3a21169 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -4,6 +4,17 @@
#include <core/memory.h>
enum nvkm_memory_target;
+union nvkm_pte_tracker {
+ u32 u;
+ struct {
+ u32 sparse:1;
+ u32 spte_valid:1;
+ u32 lpte_valid:1;
+ u32 lptes:13;
+ u32 sptes:16;
+ } s;
+};
+
struct nvkm_vmm_pt {
/* Some GPUs have a mapping level with a dual page tables to
* support large and small pages in the same address-range.
@@ -44,10 +55,7 @@ struct nvkm_vmm_pt {
*
* This information is used to manage LPTE state transitions.
*/
-#define NVKM_VMM_PTE_SPARSE 0x80
-#define NVKM_VMM_PTE_VALID 0x40
-#define NVKM_VMM_PTE_SPTES 0x3f
- u8 pte[];
+ union nvkm_pte_tracker pte[];
};
typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 370424ddfc80..8b2bfb7d3638 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -88,11 +88,9 @@ static const struct drm_dsc_config tianma_il79900a_dsc = {
.native_422 = false,
.simple_422 = false,
.vbr_enable = false,
- .rc_model_size = DSC_RC_MODEL_SIZE_CONST,
.pic_width = 1600,
.pic_height = 2560,
.convert_rgb = 0,
- .vbr_enable = 0,
.rc_buf_thresh = {14, 28, 42, 56, 70, 84, 98, 105, 112, 119, 121, 123, 125, 126},
.rc_model_size = DSC_RC_MODEL_SIZE_CONST,
.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST,
@@ -105,7 +103,6 @@ static const struct drm_dsc_config tianma_il79900a_dsc = {
.initial_offset = 6144,
.rc_quant_incr_limit0 = 11,
.rc_quant_incr_limit1 = 11,
- .nfl_bpg_offset = 1402,
.rc_range_params = {
{ 0, 4, DSC_BPG_OFFSET(2)},
{ 0, 4, DSC_BPG_OFFSET(0)},
@@ -123,7 +120,6 @@ static const struct drm_dsc_config tianma_il79900a_dsc = {
{ 9, 12, DSC_BPG_OFFSET(-12)},
{12, 13, DSC_BPG_OFFSET(-12)},
},
- .initial_scale_value = 32,
.slice_chunk_size = 800,
.initial_dec_delay = 657,
.final_offset = 4320,