summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/gsp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/gsp')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c1793
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c616
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1559
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h225
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c691
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c271
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
88 files changed, 13098 insertions, 1440 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 16bf2f1bb780..e9c948b67bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/gh100.o
nvkm-y += nvkm/subdev/gsp/ad102.o
+nvkm-y += nvkm/subdev/gsp/gb100.o
+nvkm-y += nvkm/subdev/gsp/gb202.o
-nvkm-y += nvkm/subdev/gsp/r535.o
+include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index c849c6299c52..eb765da0876e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -22,30 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-ad102_gsp_r535_113_01 = {
+ad102_gsp = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ad10x_gpu,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
{}
};
@@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index da1bebb896f7..d23243a83a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- if (!gsp->func->fini)
+ if (!gsp->func->fini || !gsp->running)
return 0;
return gsp->func->fini(gsp, suspend);
@@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
return gsp->func->oneinit(gsp);
}
+void
+nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.fmc);
+ gsp->fws.fmc = NULL;
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
@@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
gsp->func->dtor(gsp);
nvkm_falcon_dtor(&gsp->falcon);
+ kfree(gsp->rm);
return gsp;
}
@@ -101,6 +117,16 @@ nvkm_gsp = {
};
int
+nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
@@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
- gsp->rm = gsp->func->rm;
+
+ if (fwif->rm) {
+ nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver);
+
+ gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL);
+ if (!gsp->rm)
+ return -ENOMEM;
+
+ gsp->rm->device = device;
+ gsp->rm->gpu = fwif->func->rm.gpu;
+ gsp->rm->wpr = fwif->rm->wpr;
+ gsp->rm->api = fwif->rm->api;
+ }
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index 223f68b532ef..d201e8697226 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -39,29 +39,27 @@ ga100_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga100_gsp_r535_113_01 = {
+ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga100_gpu,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
- { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 4c4b4168a266..917f7e2f6c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -150,25 +150,21 @@ ga102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga102_gsp_r535_113_01 = {
+ga102_gsp_r535 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga1xx_gpu,
};
static const struct nvkm_gsp_func
@@ -178,7 +174,8 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
@@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
new file mode 100644
index 000000000000..12a3f2c1ed82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb10x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb10x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb100_gsps[] = {
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ {}
+};
+
+int
+gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb100, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb102, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
new file mode 100644
index 000000000000..c1d718172ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb202_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb20x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb20x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb202_gsps[] = {
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ {}
+};
+
+int
+gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb202, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb203, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb205, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb206, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb207, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
new file mode 100644
index 000000000000..ce31e8248807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <linux/elf.h>
+#include <linux/crc32.h>
+
+#include <subdev/fb.h>
+#include <subdev/fsp.h>
+
+#include <rm/r570/nvrm/gsp.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_falcon_v4.h>
+#include <nvhw/ref/gh100/dev_riscv_pri.h>
+
+int
+gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ struct nvkm_falcon *falcon = &gsp->falcon;
+ int ret, time = 4000;
+
+ /* Shutdown RM. */
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ /* Wait for RISC-V to halt. */
+ do {
+ u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL);
+
+ if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static bool
+gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0)
+{
+ u32 data;
+
+ /* Wait for GSP access via BAR0 to be allowed. */
+ *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0);
+
+ if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100)
+ return false;
+
+ /* Check if an error code has been reported. */
+ if (*mbox0) {
+ u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1);
+
+ /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */
+ if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr)
+ return true;
+ }
+
+ /* Check if lockdown has been released. */
+ data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2);
+ return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN);
+}
+
+int
+gh100_gsp_init(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ const bool resume = gsp->sr.meta.data != NULL;
+ struct nvkm_gsp_mem *meta;
+ GSP_FMC_BOOT_PARAMS *args;
+ int ret, time = 4000;
+ u32 rsvd_size;
+ u32 mbox0;
+
+ if (!resume) {
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args);
+ if (ret)
+ return ret;
+
+ meta = &gsp->wpr_meta;
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+ meta = &gsp->sr.meta;
+ }
+
+ args = gsp->fmc.args.data;
+
+ args->bootGspRmParams.gspRmDescOffset = meta->addr;
+ args->bootGspRmParams.gspRmDescSize = meta->size;
+ args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM;
+ args->bootGspRmParams.bIsGspRmBoot = 1;
+
+ args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM;
+ args->gspRmParams.bootArgsOffset = gsp->libos.addr;
+
+ rsvd_size = gsp->fb.heap.size;
+ if (gsp->rm->wpr->rsvd_size_pmu)
+ rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000);
+
+ ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume,
+ gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig);
+ if (ret)
+ return ret;
+
+ do {
+ if (gh100_gsp_lockdown_released(gsp, &mbox0))
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0) {
+ nvkm_error(subdev, "GSP-FMC boot timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mbox0) {
+ nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0);
+ return -EIO;
+ }
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ gsp->fb.bios.vga_workspace.size = 128 * 1024;
+ gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sizeOfRadix3Elf = gsp->fw.len;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp);
+ meta->frtsSize = 0x100000;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu;
+ return 0;
+}
+
+/* The sh_flags value for the binary blobs in the ELF image */
+#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC)
+
+#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr))
+#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr))
+
+/* The FMC ELF header must be exactly this */
+static const u8 elf_header[] = {
+ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */
+
+ ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */
+ ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */
+ 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */
+
+ 6, 0, 1, 0, /* e_shnum, e_shstrndx */
+};
+
+/**
+ * elf_validate_sections - validate each section in the FMC ELF image
+ * @elf: ELF image
+ * @length: size of the entire ELF image
+ */
+static bool
+elf_validate_sections(const void *elf, size_t length)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+
+ /* The offset of the first section */
+ Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize;
+
+ if (section_begin > length)
+ return false;
+
+ /* The first section header is the null section, so skip it */
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (i == ehdr->e_shstrndx) {
+ if (shdr[i].sh_type != SHT_STRTAB)
+ return false;
+ if (shdr[i].sh_flags != SHF_STRINGS)
+ return false;
+ } else {
+ if (shdr[i].sh_type != SHT_PROGBITS)
+ return false;
+ if (shdr[i].sh_flags != FMC_SHF_FLAGS)
+ return false;
+ }
+
+ /* Ensure that each section is inside the image */
+ if (shdr[i].sh_offset < section_begin ||
+ (u64)shdr[i].sh_offset + shdr[i].sh_size > length)
+ return false;
+
+ /* Non-zero sh_info is a CRC */
+ if (shdr[i].sh_info) {
+ /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */
+ u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0;
+
+ if (shdr[i].sh_info != crc32)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * elf_section - return a pointer to the data for a given section
+ * @elf: ELF image
+ * @name: section name to search for
+ * @len: pointer to returned length of found section
+ */
+static const void *
+elf_section(const void *elf, const char *name, unsigned int *len)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+ const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (!strcmp(&names[shdr[i].sh_name], name)) {
+ *len = shdr[i].sh_size;
+ return elf + shdr[i].sh_offset;
+ }
+ }
+
+ return NULL;
+}
+
+int
+gh100_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fsp *fsp = device->fsp;
+ const void *fw = gsp->fws.fmc->data;
+ const void *hash, *sig, *pkey, *img;
+ unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0;
+ int ret;
+
+ if (gsp->fws.fmc->size < ELF_HDR_SIZE ||
+ memcmp(fw, elf_header, sizeof(elf_header)) ||
+ !elf_validate_sections(fw, gsp->fws.fmc->size)) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ hash = elf_section(fw, "hash", &hash_len);
+ sig = elf_section(fw, "signature", &sig_len);
+ pkey = elf_section(fw, "publickey", &pkey_len);
+ img = elf_section(fw, "image", &img_len);
+
+ if (!hash || !sig || !pkey || !img) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len))
+ return -EINVAL;
+
+ /* Load GSP-FMC FW into memory. */
+ ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->fmc.fw.data, img, img_len);
+
+ gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL);
+ gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL);
+ gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL);
+ if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig)
+ return -ENOMEM;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ return gh100_gsp_wpr_meta_init(gsp);
+}
+
+static const struct nvkm_gsp_func
+gh100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gh100",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gh100_gpu,
+};
+
+int
+gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
+static struct nvkm_gsp_fwif
+gh100_gsps[] = {
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ {}
+};
+
+int
+gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gh100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 9f4a62375a27..4f14e85fc69e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_GSP_PRIV_H__
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
+#include <rm/gpu.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
@@ -11,12 +12,32 @@ struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const struct nvkm_rm_impl *rm;
const char *ver;
bool enable;
};
+int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
+ const struct firmware **);
+void nvkm_gsp_dtor_fws(struct nvkm_gsp *);
+
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
-int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *);
+
+int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
+
+#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
@@ -25,12 +46,6 @@ struct nvkm_gsp_func {
char *sig_section;
struct {
- u32 os_carveout_size;
- u32 base_size;
- u64 min_size;
- } wpr_heap;
-
- struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
@@ -41,7 +56,9 @@ struct nvkm_gsp_func {
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
- const struct nvkm_gsp_rm *rm;
+ struct {
+ const struct nvkm_rm_gpu *gpu;
+ } rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
@@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_init(struct nvkm_gsp *);
+int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
int tu102_gsp_reset(struct nvkm_gsp *);
+u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
@@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
+int gh100_gsp_oneinit(struct nvkm_gsp *);
+int gh100_gsp_init(struct nvkm_gsp *);
+int gh100_gsp_fini(struct nvkm_gsp *, bool suspend);
+
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
-extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
new file mode 100644
index 000000000000..04037394a2da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+nvkm-y += nvkm/subdev/gsp/rm/client.o
+nvkm-y += nvkm/subdev/gsp/rm/engine.o
+nvkm-y += nvkm/subdev/gsp/rm/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/nvenc.o
+
+nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ga100.o
+nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ad10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gh100.o
+nvkm-y += nvkm/subdev/gsp/rm/gb10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gb20x.o
+
+include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild
+include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
new file mode 100644
index 000000000000..e1ce6355c35f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ad10x_gpu = {
+ .disp.class = {
+ .root = AD102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = AD102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = ADA_A,
+ .compute = ADA_COMPUTE_A,
+ },
+ .nvdec.class = NVC9B0_VIDEO_DECODER,
+ .nvenc.class = NVC9B7_VIDEO_ENCODER,
+ .ofa.class = NVC9FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
new file mode 100644
index 000000000000..72d3e3ca84c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "rm.h"
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0);
+ struct nvkm_gsp *gsp = client->gsp;
+
+ if (!gsp)
+ return;
+
+ if (client->object.client)
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, id);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int id, ret;
+
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ mutex_lock(&gsp->client_id.mutex);
+ id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (id < 0)
+ return id;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id));
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
new file mode 100644
index 000000000000..3b0e83b2f57f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include "gpu.h"
+
+#include <core/object.h>
+#include <engine/fifo/chan.h>
+
+struct nvkm_rm_engine {
+ struct nvkm_engine engine;
+
+ struct nvkm_engine_func func;
+};
+
+struct nvkm_rm_engine_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void*
+nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+nvkm_rm_engine_obj = {
+ .dtor = nvkm_rm_engine_obj_dtor,
+};
+
+int
+nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_rm *rm = chan->client->gsp->rm;
+ const int inst = oclass->engine->subdev.inst;
+ const u32 class = oclass->base.oclass;
+ const u32 handle = oclass->handle;
+ struct nvkm_rm_engine_obj *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ switch (oclass->engine->subdev.type) {
+ case NVKM_ENGINE_CE:
+ ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_GR:
+ ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVENC:
+ ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVJPG:
+ ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_OFA:
+ ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+
+ if (ret) {
+ kfree(obj);
+ return ret;
+ }
+
+ nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+ return 0;
+}
+
+static int
+nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+
+ return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
+}
+
+static void *
+nvkm_rm_engine_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *engine)
+{
+ struct nvkm_engine_func *func;
+
+ func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = dtor;
+
+ for (int i = 0; i < nclass; i++) {
+ func->sclass[i].oclass = class[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
+ }
+
+ nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
+ return 0;
+}
+
+static int
+nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
+ struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine;
+ int ret;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
+ if (ret) {
+ kfree(engine);
+ return ret;
+ }
+
+ *pengine = engine;
+ return 0;
+}
+
+int
+nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
+{
+ const struct nvkm_rm_gpu *gpu = rm->gpu;
+ struct nvkm_device *device = rm->device;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
+ case NVKM_ENGINE_GR:
+ if (inst != 0)
+ return -ENODEV; /* MiG not supported, just ignore. */
+
+ return nvkm_rm_gr_new(rm);
+ case NVKM_ENGINE_NVDEC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
+ return -EINVAL;
+
+ return nvkm_rm_nvdec_new(rm, inst);
+ case NVKM_ENGINE_NVENC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
+ return -EINVAL;
+
+ return nvkm_rm_nvenc_new(rm, inst);
+ case NVKM_ENGINE_NVJPG:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
+ case NVKM_ENGINE_OFA:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
new file mode 100644
index 000000000000..5b8c9c3901d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_ENGINE_H__
+#define __NVKM_RM_ENGINE_H__
+#include "gpu.h"
+
+int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *);
+int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst);
+
+int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *,
+ struct nvkm_object **);
+
+int nvkm_rm_gr_new(struct nvkm_rm *);
+int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst);
+int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
new file mode 100644
index 000000000000..a48c6134075d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga100_gpu = {
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_A,
+ .compute = AMPERE_COMPUTE_A,
+ },
+ .nvdec.class = NVC6B0_VIDEO_DECODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
new file mode 100644
index 000000000000..50536ad7f85d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga1xx_gpu = {
+ .disp.class = {
+ .root = GA102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = GA102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_B,
+ .compute = AMPERE_COMPUTE_B,
+ },
+ .nvdec.class = NVC7B0_VIDEO_DECODER,
+ .nvenc.class = NVC7B7_VIDEO_ENCODER,
+ .ofa.class = NVC7FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
new file mode 100644
index 000000000000..2f517dcd721a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb10x_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = BLACKWELL_DMA_COPY_A,
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_A,
+ .compute = BLACKWELL_COMPUTE_A,
+ },
+ .nvdec.class = NVCDB0_VIDEO_DECODER,
+ .nvjpg.class = NVCDD1_VIDEO_NVJPG,
+ .ofa.class = NVCDFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
new file mode 100644
index 000000000000..950471d9996e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/ce/priv.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb20x_gpu = {
+ .disp.class = {
+ .root = GB202_DISP,
+ .caps = GB202_DISP_CAPS,
+ .core = GB202_DISP_CORE_CHANNEL_DMA,
+ .wndw = GB202_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GB202_DISP_CURSOR,
+ },
+
+ .usermode.class = BLACKWELL_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_B,
+ .doorbell_handle = gb202_chan_doorbell_handle,
+ },
+
+ .ce = {
+ .class = BLACKWELL_DMA_COPY_B,
+ .grce_mask = gb202_ce_grce_mask,
+ },
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_B,
+ .compute = BLACKWELL_COMPUTE_B,
+ },
+ .nvdec.class = NVCFB0_VIDEO_DECODER,
+ .nvenc.class = NVCFB7_VIDEO_ENCODER,
+ .nvjpg.class = NVCFD1_VIDEO_NVJPG,
+ .ofa.class = NVCFFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
new file mode 100644
index 000000000000..49e2c54e1aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gh100_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = HOPPER_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = HOPPER_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = HOPPER_A,
+ .compute = HOPPER_COMPUTE_A,
+ },
+ .nvdec.class = NVB8B0_VIDEO_DECODER,
+ .nvjpg.class = NVB8D1_VIDEO_NVJPG,
+ .ofa.class = NVB8FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
new file mode 100644
index 000000000000..46a6325641b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_GPU_H__
+#define __NVKM_RM_GPU_H__
+#include "rm.h"
+
+struct nvkm_rm_gpu {
+ struct {
+ struct {
+ u32 root;
+ u32 caps;
+ u32 core;
+ u32 wndw;
+ u32 wimm;
+ u32 curs;
+ } class;
+ } disp;
+
+ struct {
+ u32 class;
+ } usermode;
+
+ struct {
+ struct {
+ u32 class;
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+ } chan;
+ } fifo;
+
+ struct {
+ u32 class;
+ u32 (*grce_mask)(struct nvkm_device *);
+ } ce;
+
+ struct {
+ struct {
+ u32 i2m;
+ u32 twod;
+ u32 threed;
+ u32 compute;
+ } class;
+ } gr;
+
+ struct {
+ u32 class;
+ } nvdec;
+
+ struct {
+ u32 class;
+ } nvenc;
+
+ struct {
+ u32 class;
+ } nvjpg;
+
+ struct {
+ u32 class;
+ } ofa;
+};
+
+extern const struct nvkm_rm_gpu tu1xx_gpu;
+extern const struct nvkm_rm_gpu ga100_gpu;
+extern const struct nvkm_rm_gpu ga1xx_gpu;
+extern const struct nvkm_rm_gpu ad10x_gpu;
+extern const struct nvkm_rm_gpu gh100_gpu;
+extern const struct nvkm_rm_gpu gb10x_gpu;
+extern const struct nvkm_rm_gpu gb20x_gpu;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
new file mode 100644
index 000000000000..f40b8fcc2bcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gr.h"
+
+#include <engine/fifo.h>
+#include <engine/gr/priv.h>
+
+static int
+nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+
+ return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject);
+}
+
+static int
+nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+
+ if (rm->api->gr->scrubber.fini)
+ rm->api->gr->scrubber.fini(gr);
+
+ return 0;
+}
+
+static int
+nvkm_rm_gr_init(struct nvkm_gr *base)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ int ret;
+
+ if (rm->api->gr->scrubber.init) {
+ ret = rm->api->gr->scrubber.init(gr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nvkm_rm_gr_new(struct nvkm_rm *rm)
+{
+ const u32 classes[] = {
+ rm->gpu->gr.class.i2m,
+ rm->gpu->gr.class.twod,
+ rm->gpu->gr.class.threed,
+ rm->gpu->gr.class.compute,
+ };
+ struct nvkm_gr_func *func;
+ struct r535_gr *gr;
+
+ func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = r535_gr_dtor;
+ func->oneinit = r535_gr_oneinit;
+ func->init = nvkm_rm_gr_init;
+ func->fini = nvkm_rm_gr_fini;
+ func->units = r535_gr_units;
+ func->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < ARRAY_SIZE(classes); i++) {
+ func->sclass[i].oclass = classes[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_gr_obj_ctor;
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (!gr) {
+ kfree(func);
+ return -ENOMEM;
+ }
+
+ nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base);
+ gr->scrubber.chid = -1;
+ rm->device->gr = &gr->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
new file mode 100644
index 000000000000..24980f23aab9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RM_GR_H__
+#define __NVKM_RM_GR_H__
+#include "engine.h"
+
+#include <core/object.h>
+#include <engine/gr.h>
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+
+ struct {
+ int chid;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_gsp_object threed;
+ struct {
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } ctxbuf;
+ bool enabled;
+ } scrubber;
+};
+
+struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+void r535_gr_get_ctxbuf_info(struct r535_gr *, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
new file mode 100644
index 000000000000..3bdb5ad320d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_HANDLES_H__
+#define __NVKM_RM_HANDLES_H__
+
+/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */
+
+#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id))
+#define NVKM_RM_CLIENT_MASK 0x0000ffff
+#define NVKM_RM_DEVICE 0xde1d0000
+#define NVKM_RM_SUBDEVICE 0x5d1d0000
+#define NVKM_RM_DISP 0x00730000
+#define NVKM_RM_VASPACE 0x90f10000
+#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid))
+#define NVKM_RM_THREED 0x97000000
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
new file mode 100644
index 000000000000..d9fbfc377864
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvdec.h>
+
+static void *
+nvkm_rm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvdec, engine);
+}
+
+int
+nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvdec *nvdec;
+ int ret;
+
+ nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst,
+ &rm->gpu->nvdec.class, 1, &nvdec->engine);
+ if (ret) {
+ kfree(nvdec);
+ return ret;
+ }
+
+ rm->device->nvdec[inst] = nvdec;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
new file mode 100644
index 000000000000..6dfa7b789e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvenc.h>
+
+static void *
+nvkm_rm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvenc, engine);
+}
+
+int
+nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL);
+ if (!nvenc)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst,
+ &rm->gpu->nvenc.class, 1, &nvenc->engine);
+ if (ret) {
+ kfree(nvenc);
+ return ret;
+ }
+
+ rm->device->nvenc[inst] = nvenc;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
new file mode 100644
index 000000000000..a5f6b2abfd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
new file mode 100644
index 000000000000..46e3a29f2ad7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/alloc.h"
+#include "nvrm/rpcfn.h"
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret = NULL;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
+
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_alloc
+r535_alloc = {
+ .get = r535_gsp_rpc_rm_alloc_get,
+ .push = r535_gsp_rpc_rm_alloc_push,
+ .done = r535_gsp_rpc_rm_alloc_done,
+ .free = r535_gsp_rpc_rm_free,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
new file mode 100644
index 000000000000..d06bf95b9a4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/bar/gf100.h>
+
+#include <core/mm.h>
+#include <subdev/fb.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/bar.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_bar_flush(struct nvkm_bar *bar)
+{
+ /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before
+ * BAR2 page tables have been restored.
+ */
+ if (unlikely(!bar->bar2)) {
+ g84_bar_flush(bar);
+ return;
+ }
+
+ ioread32_native(bar->flushBAR2);
+}
+
+static void
+r535_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static int
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe)
+{
+ rpc_update_bar_pde_v15_00 *rpc;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+ rpc->info.entryValue = pdbe;
+ rpc->info.entryLevelShift = page_shift;
+
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+ struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+ bar->flushBAR2 = bar->flushBAR2PhysMode;
+ nvkm_done(bar->flushFBZero);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0));
+}
+
+static void
+r535_bar_bar2_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_memory *pdb = vmm->pd->pt[0]->memory;
+ u32 pdb_offset = vmm->pd->pt[0]->base;
+ u32 pdbe_lo, pdbe_hi;
+ u64 pdbe;
+
+ nvkm_kmap(pdb);
+ pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0);
+ pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4);
+ pdbe = ((u64)pdbe_hi << 32) | pdbe_lo;
+ nvkm_done(pdb);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe));
+ vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+
+ if (!bar->flushFBZero) {
+ struct nvkm_memory *fbZero;
+ int ret;
+
+ ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
+ if (ret == 0) {
+ ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
+ nvkm_memory_unref(&fbZero);
+ }
+ WARN_ON(ret);
+ }
+
+ bar->bar2 = true;
+ bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
+ WARN_ON(!bar->flushBAR2);
+}
+
+static void
+r535_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+ struct nvkm_memory *pd3;
+ int ret;
+
+ ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+ if (WARN_ON(ret))
+ return;
+
+ nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+ ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+ nvkm_memory_unref(&pd3);
+ if (WARN_ON(ret))
+ return;
+
+ vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r535_bar_dtor(struct nvkm_bar *bar)
+{
+ void *data = gf100_bar_dtor(bar);
+
+ nvkm_memory_unref(&bar->flushFBZero);
+
+ if (bar->flushBAR2PhysMode)
+ iounmap(bar->flushBAR2PhysMode);
+
+ kfree(bar->func);
+ return data;
+}
+
+int
+r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+ struct nvkm_bar_func *rm;
+ struct nvkm_bar *bar;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_bar_dtor;
+ rm->oneinit = hw->oneinit;
+ rm->bar1.init = r535_bar_bar1_init;
+ rm->bar1.fini = r535_bar_bar1_fini;
+ rm->bar1.wait = r535_bar_bar1_wait;
+ rm->bar1.vmm = hw->bar1.vmm;
+ rm->bar2.init = r535_bar_bar2_init;
+ rm->bar2.fini = r535_bar_bar2_fini;
+ rm->bar2.wait = r535_bar_bar2_wait;
+ rm->bar2.vmm = hw->bar2.vmm;
+ rm->flush = r535_bar_flush;
+
+ ret = gf100_bar_new_(rm, device, type, inst, &bar);
+ if (ret) {
+ kfree(rm);
+ return ret;
+ }
+ *pbar = bar;
+
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
+ if (!bar->flushBAR2PhysMode)
+ return -ENOMEM;
+
+ bar->flushBAR2 = bar->flushBAR2PhysMode;
+
+ gf100_bar(*pbar)->bar2_halve = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644
index 000000000000..2d1ce9db2dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ce.h"
+#include "nvrm/engine.h"
+
+static int
+r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ce)
+{
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst;
+
+ return nvkm_gsp_rm_alloc_wr(ce, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ce = {
+ .alloc = r535_ce_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
new file mode 100644
index 000000000000..ec71f683e609
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r535_client = {
+ .ctor = r535_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
new file mode 100644
index 000000000000..70b9ee911c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/ctrl.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ if (!params)
+ return;
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ int ret = 0;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+ if (IS_ERR_OR_NULL(rpc)) {
+ *params = NULL;
+ return PTR_ERR(rpc);
+ }
+
+ if (rpc->status) {
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN && ret != -EBUSY)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ }
+
+ if (repc)
+ *params = rpc->params;
+ else
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_ctrl
+r535_ctrl = {
+ .get = r535_gsp_rpc_rm_ctrl_get,
+ .push = r535_gsp_rpc_rm_ctrl_push,
+ .done = r535_gsp_rpc_rm_ctrl_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
new file mode 100644
index 000000000000..f830e12a8f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/device.h"
+#include "nvrm/event.h"
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0,
+ sizeof(*args), &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+const struct nvkm_rm_api_device
+r535_device = {
+ .ctor = r535_gsp_device_ctor,
+ .dtor = r535_gsp_device_dtor,
+ .event.ctor = r535_gsp_device_event_ctor,
+ .event.dtor = r535_gsp_event_dtor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
new file mode 100644
index 000000000000..7e9e2d3564da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -0,0 +1,1793 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
+
+#include <core/ramht.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+
+#include <rm/gpu.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/disp.h"
+
+#include <linux/acpi.h>
+
+static u64
+r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+ switch (chan->object.oclass & 0xff) {
+ case 0x7d: *psize = 0x10000; return 0x680000;
+ case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
+ case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
+ case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ return 0ULL;
+}
+
+static void
+r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+}
+
+static void
+r535_chan_fini(struct nvkm_disp_chan *chan)
+{
+ nvkm_gsp_rm_free(&chan->rm.object);
+}
+
+static int
+r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r535_curs_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL);
+ if (ret)
+ return ret;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+ (chan->object.oclass << 16) | chan->head,
+ chan->object.oclass, sizeof(*args), &chan->rm.object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = chan->head;
+
+ return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static const struct nvkm_disp_chan_func
+r535_curs_func = {
+ .init = r535_curs_init,
+ .fini = r535_chan_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_curs = {
+ .func = &r535_curs_func,
+ .user = 73,
+};
+
+static int
+r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
+{
+ return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
+ chan->chid.user << 25 |
+ (chan->disp->rm.client.object.handle & 0x3fff));
+}
+
+static void
+r535_dmac_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_device *device = chan->disp->engine.subdev.device;
+ const u32 uoff = (chan->chid.user - 1) * 0x1000;
+
+ chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
+ r535_chan_fini(chan);
+}
+
+static int
+r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory);
+ if (ret)
+ return ret;
+
+ return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head,
+ chan->suspend_put, &chan->rm.object);
+}
+
+static int
+r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
+{
+ chan->memory = nvkm_umem_search(chan->object.client, memory);
+ if (IS_ERR(chan->memory))
+ return PTR_ERR(chan->memory);
+
+ return 0;
+}
+
+static const struct nvkm_disp_chan_func
+r535_dmac_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_dmac_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+ .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_func
+r535_wimm_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_dmac_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wimm = {
+ .func = &r535_wimm_func,
+ .user = 33,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wndw = {
+ .func = &r535_dmac_func,
+ .user = 1,
+};
+
+static void
+r535_core_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_device *device = chan->disp->engine.subdev.device;
+
+ chan->suspend_put = nvkm_rd32(device, 0x680000);
+ r535_chan_fini(chan);
+}
+
+static const struct nvkm_disp_chan_func
+r535_core_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_core_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+ .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+r535_core = {
+ .func = &r535_core_func,
+ .user = 0,
+};
+
+static int
+r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+
+ return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl);
+}
+
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+ int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl);
+
+ return (ret == 0) ? lvl : ret;
+}
+
+static const struct nvkm_ior_func_bl
+r535_sor_bl = {
+ .get = r535_sor_bl_get,
+ .set = r535_sor_bl_set,
+};
+
+static void
+r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+ if (WARN_ON(size > sizeof(ctrl->bufferELD)))
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->numELDSize = size;
+ memcpy(ctrl->bufferELD, data, size);
+ ctrl->maxFreqSupported = 0; //XXX
+ ctrl->ctrl = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
+ ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
+ ctrl->deviceEntry = head;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+ if (present)
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->deviceEntry = head;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hda
+r535_sor_hda = {
+ .hpd = r535_sor_hda_hpd,
+ .eld = r535_sor_hda_eld,
+};
+
+static void
+r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->mute = mute;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
+
+ if (!enable)
+ r535_sor_dp_audio_mute(sor, true);
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->enable = enable;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+
+ if (enable)
+ r535_sor_dp_audio_mute(sor, false);
+}
+
+static void
+r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
+{
+ struct nvkm_disp *disp = sor->disp;
+ struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+ ctrl->sorIndex = sor->id;
+ ctrl->dpLink = sor->asy.link == 2;
+ ctrl->bEnableOverride = 1;
+ ctrl->bMST = 1;
+ ctrl->hBlankSym = 0;
+ ctrl->vBlankSym = 0;
+ ctrl->colorFormat = 0;
+ ctrl->bEnableTwoHeadOneOr = 0;
+ ctrl->singleHeadMultistreamMode = 0;
+ ctrl->MST.slotStart = slot;
+ ctrl->MST.slotEnd = slot + slot_nr - 1;
+ ctrl->MST.PBN = pbn;
+ ctrl->MST.Timeslice = aligned_pbn;
+ ctrl->MST.sendACT = 0;
+ ctrl->MST.singleHeadMSTPipeline = 0;
+ ctrl->MST.bEnableAudioOverRightPanel = 0;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static int
+r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
+ u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+ struct nvkm_disp *disp = sor->disp;
+ struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+ ctrl->sorIndex = sor->id;
+ ctrl->dpLink = sor->asy.link == 2;
+ ctrl->bEnableOverride = 1;
+ ctrl->bMST = 0;
+ ctrl->hBlankSym = hblanksym;
+ ctrl->vBlankSym = vblanksym;
+ ctrl->colorFormat = 0;
+ ctrl->bEnableTwoHeadOneOr = 0;
+ ctrl->SST.bEnhancedFraming = ef;
+ ctrl->SST.tuSize = 64;
+ ctrl->SST.waterMark = watermark;
+ ctrl->SST.bEnableAudioOverRightPanel = 0;
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static const struct nvkm_ior_func_dp
+r535_sor_dp = {
+ .sst = r535_sor_dp_sst,
+ .vcpi = r535_sor_dp_vcpi,
+ .audio = r535_sor_dp_audio,
+};
+
+static void
+r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
+ bool scrambling_low_rates)
+{
+ struct nvkm_outp *outp = sor->asy.outp;
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->caps = 0;
+ if (support)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
+ if (scrambling)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
+ if (scrambling_low_rates)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->mute = mute;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->transmitControl =
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
+ ctrl->packetSize = 10;
+ ctrl->aPacket[0] = 0x03;
+ ctrl->aPacket[1] = 0x00;
+ ctrl->aPacket[2] = 0x00;
+ ctrl->aPacket[3] = enable ? 0x10 : 0x01;
+ ctrl->aPacket[4] = 0x00;
+ ctrl->aPacket[5] = 0x00;
+ ctrl->aPacket[6] = 0x00;
+ ctrl->aPacket[7] = 0x00;
+ ctrl->aPacket[8] = 0x00;
+ ctrl->aPacket[9] = 0x00;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 hdmi = head * 0x400;
+
+ r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
+ r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
+
+ /* General Control (GCP). */
+ nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
+ nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+}
+
+static void
+r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
+
+ if (!enable)
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->enable = enable;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hdmi
+r535_sor_hdmi = {
+ .ctrl = r535_sor_hdmi_ctrl,
+ .scdc = r535_sor_hdmi_scdc,
+ /*TODO: SF_USER -> KMS. */
+ .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
+ .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
+ .audio = r535_sor_hdmi_audio,
+};
+
+static const struct nvkm_ior_func
+r535_sor = {
+ .hdmi = &r535_sor_hdmi,
+ .dp = &r535_sor_dp,
+ .hda = &r535_sor_hda,
+ .bl = &r535_sor_bl,
+};
+
+static int
+r535_sor_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
+}
+
+static int
+r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ *pmask = 0xf;
+ return 4;
+}
+
+static void
+r535_head_vblank_put(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
+}
+
+static void
+r535_head_vblank_get(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
+ nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
+}
+
+static void
+r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+}
+
+static const struct nvkm_head_func
+r535_head = {
+ .state = r535_head_state,
+ .vblank_get = r535_head_vblank_get,
+ .vblank_put = r535_head_vblank_put,
+};
+
+static struct nvkm_conn *
+r535_conn_new(struct nvkm_disp *disp, u32 id)
+{
+ NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
+ struct nvbios_connE dcbE = {};
+ struct nvkm_conn *conn;
+ int ret, index;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return (void *)ctrl;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ERR_PTR(ret);
+ }
+
+ list_for_each_entry(conn, &disp->conns, head) {
+ if (conn->index == ctrl->data[0].index) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return conn;
+ }
+ }
+
+ dcbE.type = ctrl->data[0].type;
+ index = ctrl->data[0].index;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ ret = nvkm_conn_new(disp, index, &dcbE, &conn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ list_add_tail(&conn->head, &disp->conns);
+ return conn;
+}
+
+static void
+r535_outp_release(struct nvkm_outp *outp)
+{
+ outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
+ outp->ior->asy.outp = NULL;
+ outp->ior = NULL;
+}
+
+static int
+r535_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+ struct nvkm_disp *disp = outp->disp;
+ struct nvkm_ior *ior;
+ NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
+ int ret, or;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->sorExcludeMask = disp->rm.assigned_sors;
+ if (hda)
+ ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
+ if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
+ disp->rm.assigned_sors |= BIT(or);
+ break;
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
+ return -EINVAL;
+
+ ior = nvkm_ior_find(disp, SOR, or);
+ if (WARN_ON(!ior))
+ return -EINVAL;
+
+ nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+ return 0;
+}
+
+static int
+r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static struct nvkm_ior *
+r535_outp_inherit(struct nvkm_outp *outp)
+{
+ struct nvkm_disp *disp = outp->disp;
+ struct nvkm_head *head;
+ u32 displayid;
+ int ret;
+
+ list_for_each_entry(head, &disp->heads, head) {
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+
+ ret = rmapi->disp->get_active(disp, head->id, &displayid);
+ if (WARN_ON(ret))
+ return NULL;
+
+ if (displayid == BIT(outp->index)) {
+ NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+ u32 id, proto;
+ struct nvkm_ior *ior;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return NULL;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = displayid;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return NULL;
+ }
+
+ id = ctrl->index;
+ proto = ctrl->protocol;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ ior = nvkm_ior_find(disp, SOR, id);
+ if (WARN_ON(!ior))
+ return NULL;
+
+ switch (proto) {
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 2;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 3;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+ ior->arm.proto = DP;
+ ior->arm.link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+ ior->arm.proto = DP;
+ ior->arm.link = 2;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ ior->arm.proto_evo = proto;
+ ior->arm.head = BIT(head->id);
+ disp->rm.assigned_sors |= BIT(ior->id);
+ return ior;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+r535_outp_dfp_get_info(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
+ ctrl->displayId, ctrl->flags, ctrl->flags2);
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->get_connect_state(outp->disp, outp->index);
+ if (ret == 1) {
+ ret = r535_outp_dfp_get_info(outp);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+ NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = id;
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+ NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *pid = ctrl->displayIdAssigned;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+ NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->numLanes = lanes;
+ for (int i = 0; i < lanes; i++)
+ ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA, PREEMPHASIS, pe[i]) |
+ NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
+ int ret, retries;
+ u32 cmd, data;
+
+ cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+ data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+ NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+ NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+
+ if (mst)
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+
+ if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+
+ if (target == 0 &&
+ (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
+ !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+
+ /* We should retry up to 3 times, but only if GSP asks politely */
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->retryTimeMs = 0;
+ ctrl->cmd = cmd;
+ ctrl->data = data;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ /* GSP didn't say to retry, or we were successful */
+ if (ctrl->err)
+ ret = -EIO;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+r535_dp_train(struct nvkm_outp *outp, bool retrain)
+{
+ for (int target = outp->dp.lttprs; target >= 0; target--) {
+ int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
+ outp->dp.lt.nr,
+ outp->dp.lt.bw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+r535_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ return rm->api->disp->dp.set_indexed_link_rates(outp);
+}
+
+static int
+r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
+ u8 size = *psize;
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->bAddrOnly = !size;
+ ctrl->cmd = type;
+ if (ctrl->bAddrOnly) {
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
+ }
+ ctrl->addr = addr;
+ ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+ memcpy(ctrl->data, data, size);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying in AUX\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ memcpy(data, ctrl->data, size);
+ *psize = ctrl->size;
+ ret = ctrl->replyType;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int
+r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+ return 0;
+}
+
+static void
+r535_dp_release(struct nvkm_outp *outp)
+{
+ if (!outp->dp.lt.bw) {
+ if (!WARN_ON(!outp->dp.rates))
+ outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
+ else
+ outp->dp.lt.bw = 0x06;
+ }
+
+ outp->dp.lt.nr = 0;
+
+ r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
+ r535_outp_release(outp);
+}
+
+static int
+r535_dp_acquire(struct nvkm_outp *outp, bool hda)
+{
+ int ret;
+
+ ret = r535_outp_acquire(outp, hda);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nvkm_outp_func
+r535_dp = {
+ .detect = r535_outp_detect,
+ .inherit = r535_outp_inherit,
+ .acquire = r535_dp_acquire,
+ .release = r535_dp_release,
+ .dp.aux_pwr = r535_dp_aux_pwr,
+ .dp.aux_xfer = r535_dp_aux_xfer,
+ .dp.mst_id_get = r535_dp_mst_id_get,
+ .dp.mst_id_put = r535_dp_mst_id_put,
+ .dp.rates = r535_dp_rates,
+ .dp.train = r535_dp_train,
+ .dp.drive = r535_dp_drive,
+};
+
+static int
+r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
+{
+ NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+ int ret = -E2BIG;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ ret = -E2BIG;
+ if (ctrl->bufferSize <= *psize) {
+ memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
+ *psize = ctrl->bufferSize;
+ ret = 0;
+ }
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static const struct nvkm_outp_func
+r535_tmds = {
+ .detect = r535_outp_detect,
+ .inherit = r535_outp_inherit,
+ .acquire = r535_outp_acquire,
+ .release = r535_outp_release,
+ .edid_get = r535_tmds_edid_get,
+};
+
+static int
+r535_outp_new(struct nvkm_disp *disp, u32 id)
+{
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+ NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+ enum nvkm_ior_proto proto;
+ struct dcb_output dcbE = {};
+ struct nvkm_conn *conn;
+ struct nvkm_outp *outp;
+ u8 locn, link = 0;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (ctrl->type) {
+ case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
+ return 0;
+ case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
+ switch (ctrl->protocol) {
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+ proto = TMDS;
+ link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+ proto = TMDS;
+ link = 2;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+ proto = TMDS;
+ link = 3;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+ proto = DP;
+ link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+ proto = DP;
+ link = 2;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ locn = ctrl->location;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ conn = r535_conn_new(disp, id);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
+
+ switch (proto) {
+ case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
+ case DP: dcbE.type = DCB_OUTPUT_DP; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dcbE.location = locn;
+ dcbE.connector = conn->index;
+ dcbE.heads = disp->head.mask;
+ dcbE.i2c_index = 0xff;
+ dcbE.link = dcbE.sorconf.link = link;
+
+ if (proto == TMDS) {
+ ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
+ if (ret)
+ return ret;
+ } else {
+ bool mst, wm;
+
+ ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!dcbE.dpconf.link_bw))
+ return -EINVAL;
+
+ dcbE.dpconf.link_nr = 4;
+
+ ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
+ if (ret)
+ return ret;
+
+ outp->dp.mst = mst;
+ outp->dp.increased_wm = wm;
+ }
+
+
+ outp->conn = conn;
+ list_add_tail(&outp->head, &disp->outps);
+ return 0;
+}
+
+static void
+r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
+ Nv2080DpIrqNotification *irq = repv;
+
+ if (WARN_ON(repc < sizeof(*irq)))
+ return;
+
+ nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
+
+ if (irq->displayId)
+ nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
+}
+
+static void
+r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
+ Nv2080HotplugNotification *hpd = repv;
+
+ if (WARN_ON(repc < sizeof(*hpd)))
+ return;
+
+ nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
+ hpd->plugDisplayMask, hpd->unplugDisplayMask);
+
+ for (int i = 0; i < 31; i++) {
+ u32 mask = 0;
+
+ if (hpd->plugDisplayMask & BIT(i))
+ mask |= NVKM_DPYID_PLUG;
+ if (hpd->unplugDisplayMask & BIT(i))
+ mask |= NVKM_DPYID_UNPLUG;
+
+ if (mask)
+ nvkm_event_ntfy(&disp->rm.event, i, mask);
+ }
+}
+
+static const struct nvkm_event_func
+r535_disp_event = {
+};
+
+static void
+r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
+{
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
+
+ if (stat & 0x00000002) {
+ nvkm_disp_vblank(disp, head);
+
+ nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
+ }
+}
+
+static irqreturn_t
+r535_disp_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
+ int head;
+
+ for_each_set_bit(head, &mask, 8)
+ r535_disp_intr_head_timing(disp, head);
+
+ return IRQ_HANDLED;
+}
+
+static void
+r535_disp_fini(struct nvkm_disp *disp, bool suspend)
+{
+ if (!disp->engine.subdev.use.enabled)
+ return;
+
+ nvkm_gsp_rm_free(&disp->rm.object);
+
+ if (!suspend) {
+ nvkm_gsp_event_dtor(&disp->rm.irq);
+ nvkm_gsp_event_dtor(&disp->rm.hpd);
+ nvkm_event_fini(&disp->rm.event);
+
+ nvkm_gsp_rm_free(&disp->rm.objcom);
+ nvkm_gsp_device_dtor(&disp->rm.device);
+ nvkm_gsp_client_dtor(&disp->rm.client);
+ }
+}
+
+static int
+r535_disp_init(struct nvkm_disp *disp)
+{
+ int ret;
+
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
+ disp->func->root.oclass, 0, &disp->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_oneinit(struct nvkm_disp *disp)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->gsp;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
+ NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ unsigned long mask;
+ int ret, i;
+
+ /* RAMIN. */
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
+ ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
+ ctrl->instMemAddrSpace = ADDR_FBMEM;
+ ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ /* OBJs. */
+ ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0,
+ &disp->rm.objcom);
+ if (ret)
+ return ret;
+
+ ret = rmapi->disp->get_static_info(disp);
+ if (ret)
+ return ret;
+
+ /* */
+ {
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
+ struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
+ NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
+
+ {
+ const guid_t NBCI_DSM_GUID =
+ GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
+ 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
+ u64 NBCI_DSM_REV = 0x00000102;
+ const guid_t NVHG_DSM_GUID =
+ GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
+ u64 NVHG_DSM_REV = 0x00000102;
+ acpi_handle handle = ACPI_HANDLE(device->dev);
+
+ if (handle && acpi_has_method(handle, "_DSM")) {
+ bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
+ 1ULL << 0x00000014);
+ bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
+ 1ULL << 0x00000014);
+
+ if (nbci || nvhg) {
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(ctrl->backLightData),
+ .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ }, *obj;
+
+ obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
+ 0x00000102, 0x14, &argv4);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ } else {
+ for (int i = 0; i < obj->package.count; i++) {
+ union acpi_object *elt = &obj->package.elements[i];
+ u32 size;
+
+ if (elt->integer.value & ~0xffffffffULL)
+ size = 8;
+ else
+ size = 4;
+
+ memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
+ ctrl->backLightDataSize += size;
+ }
+
+ ctrl->status = 0;
+ ACPI_FREE(obj);
+ }
+
+ kfree(argv4.buffer.pointer);
+ }
+ }
+ }
+
+ ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
+ if (ret)
+ return ret;
+#endif
+ }
+
+ /* */
+ {
+ NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ if (ret)
+ return ret;
+ }
+
+ /* */
+ {
+ NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->head.nr = ctrl->numHeads;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ }
+
+ /* */
+ {
+ NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->head.mask = ctrl->headMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+ ret = nvkm_head_new_(&r535_head, disp, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
+ nvkm_debug(&disp->engine.subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+ for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+ ret = disp->func->sor.new(disp, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = rmapi->disp->get_supported(disp, &mask);
+ if (ret)
+ return ret;
+
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
+ r535_disp_hpd, &disp->rm.hpd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
+ r535_disp_irq, &disp->rm.irq);
+ if (ret)
+ return ret;
+
+ /* RAMHT. */
+ ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
+ 0x1000, 0, disp->inst, &disp->ramht);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
+ if (ret < 0)
+ return ret;
+
+ ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
+ r535_disp_intr, &disp->engine.subdev.inth);
+ if (ret)
+ return ret;
+
+ nvkm_inth_allow(&disp->engine.subdev.inth);
+ return 0;
+}
+
+static void
+r535_disp_dtor(struct nvkm_disp *disp)
+{
+ kfree(disp->func);
+}
+
+int
+r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
+ struct nvkm_disp_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_disp_dtor;
+ rm->oneinit = r535_disp_oneinit;
+ rm->init = r535_disp_init;
+ rm->fini = r535_disp_fini;
+ rm->uevent = hw->uevent;
+ rm->sor.cnt = r535_sor_cnt;
+ rm->sor.new = r535_sor_new;
+ rm->ramht_size = hw->ramht_size;
+
+ rm->root.oclass = gpu->disp.class.root;
+
+ rm->user[0].base.oclass = gpu->disp.class.caps;
+ rm->user[0].ctor = gv100_disp_caps_new;
+
+ rm->user[1].base.oclass = gpu->disp.class.core;
+ rm->user[1].ctor = nvkm_disp_core_new;
+ rm->user[1].chan = &r535_core;
+
+ rm->user[2].base.oclass = gpu->disp.class.wndw;
+ rm->user[2].ctor = nvkm_disp_wndw_new;
+ rm->user[2].chan = &r535_wndw;
+
+ rm->user[3].base.oclass = gpu->disp.class.wimm;
+ rm->user[3].ctor = nvkm_disp_wndw_new;
+ rm->user[3].chan = &r535_wimm;
+
+ rm->user[4].base.oclass = gpu->disp.class.curs;
+ rm->user[4].ctor = nvkm_disp_chan_new;
+ rm->user[4].chan = &r535_curs;
+
+ ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
+ if (ret)
+ kfree(rm);
+
+ mutex_init(&(*pdisp)->super.mutex); //XXX
+ return ret;
+}
+
+const struct nvkm_rm_api_disp
+r535_disp = {
+ .get_static_info = r535_disp_get_static_info,
+ .get_supported = r535_disp_get_supported,
+ .get_connect_state = r535_disp_get_connect_state,
+ .get_active = r535_disp_get_active,
+ .bl_ctrl = r535_bl_ctrl,
+ .dp = {
+ .get_caps = r535_dp_get_caps,
+ .set_indexed_link_rates = r535_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r535_disp_chan_set_pushbuf,
+ .dmac_alloc = r535_dmac_alloc,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
new file mode 100644
index 000000000000..150e22fde2ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/gsp.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/rpcfn.h"
+
+struct fbsr_item {
+ const char *type;
+ u64 addr;
+ u64 size;
+
+ struct list_head head;
+};
+
+struct fbsr {
+ struct list_head items;
+
+ u64 size;
+ int regions;
+
+ struct nvkm_gsp_client client;
+ struct nvkm_gsp_device device;
+
+ u64 hmemory;
+ u64 sys_offset;
+};
+
+int
+r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ const u32 pages = size / GSP_PAGE_SIZE;
+ rpc_alloc_memory_v13_01 *rpc;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+ sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ rpc->hClient = client->object.handle;
+ rpc->hDevice = device->object.handle;
+ rpc->hMemory = handle;
+ if (aper == NVKM_MEM_TARGET_HOST) {
+ rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
+ rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
+ NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
+ NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+ } else {
+ rpc->hClass = NV01_MEMORY_LIST_FBMEM;
+ rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
+ NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
+ NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+ rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
+ }
+ rpc->pteAdjust = 0;
+ rpc->length = size;
+ rpc->pageCount = pages;
+ rpc->pteDesc.idr = 0;
+ rpc->pteDesc.reserved1 = 0;
+ rpc->pteDesc.length = pages;
+
+ if (sgt) {
+ struct scatterlist *sgl;
+ int pte = 0, idx;
+
+ for_each_sgtable_dma_sg(sgt, sgl, idx) {
+ for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
+ rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
+
+ }
+ } else {
+ for (int i = 0; i < pages; i++)
+ rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+ }
+
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
+ if (ret)
+ return ret;
+
+ object->client = device->object.client;
+ object->parent = &device->object;
+ object->handle = handle;
+ return 0;
+}
+
+static int
+fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
+{
+ NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fbsr->client.gsp;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto done;
+ }
+
+ ctrl->fbsrType = FBSR_TYPE_DMA;
+ ctrl->hClient = fbsr->client.object.handle;
+ ctrl->hVidMem = fbsr->hmemory++;
+ ctrl->vidOffset = 0;
+ ctrl->sysOffset = fbsr->sys_offset;
+ ctrl->size = item->size;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+done:
+ nvkm_gsp_rm_free(&memlist);
+ if (ret)
+ return ret;
+
+ fbsr->sys_offset += item->size;
+ return 0;
+}
+
+static int
+fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fbsr->client.gsp;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->fbsrType = FBSR_TYPE_DMA;
+ ctrl->numRegions = fbsr->regions;
+ ctrl->hClient = fbsr->client.object.handle;
+ ctrl->hSysMem = fbsr->hmemory++;
+ ctrl->gspFbAllocsSysOffset = items_size;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static bool
+fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
+{
+ struct fbsr_item *item;
+
+ if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
+ return false;
+
+ item->type = type;
+ item->addr = addr;
+ item->size = size;
+ list_add_tail(&item->head, &fbsr->items);
+ return true;
+}
+
+static bool
+fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
+{
+ return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+}
+
+void
+r535_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
+}
+
+static int
+r535_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct fbsr fbsr = {};
+ struct fbsr_item *item, *temp;
+ u64 items_size;
+ int ret;
+
+ INIT_LIST_HEAD(&fbsr.items);
+ fbsr.hmemory = 0xcaf00003;
+
+ /* Create a list of all regions we need RM to save during suspend. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
+ return -ENOMEM;
+ }
+
+ if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
+ return -ENOMEM;
+
+ /* Determine memory requirements. */
+ list_for_each_entry(item, &fbsr.items, head) {
+ nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
+ item->addr, item->size, item->type);
+ fbsr.size += item->size;
+ fbsr.regions++;
+ }
+
+ items_size = fbsr.size;
+ nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
+
+ fbsr.size += gsp->fb.rsvd_size;
+ fbsr.size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
+
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
+ if (ret)
+ goto done;
+
+ /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
+ ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
+ if (ret)
+ goto done_sgt;
+
+ ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
+ if (WARN_ON(ret))
+ goto done_sgt;
+
+ /* Send VRAM regions that need saving. */
+ list_for_each_entry(item, &fbsr.items, head) {
+ ret = fbsr_send(&fbsr, item);
+ if (WARN_ON(ret))
+ goto done_sgt;
+ }
+
+ /* Cleanup everything except the sysmem backup, which will be removed after resume. */
+done_sgt:
+ if (ret) /* ... unless we failed already. */
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+done:
+ list_for_each_entry_safe(item, temp, &fbsr.items, head) {
+ list_del(&item->head);
+ kfree(item);
+ }
+
+ nvkm_gsp_device_dtor(&fbsr.device);
+ nvkm_gsp_client_dtor(&fbsr.client);
+ return ret;
+}
+
+const struct nvkm_rm_api_fbsr
+r535_fbsr = {
+ .suspend = r535_fbsr_suspend,
+ .resume = r535_fbsr_resume,
+};
+
+static void *
+r535_instmem_dtor(struct nvkm_instmem *imem)
+{
+ kfree(imem->func);
+ return imem;
+}
+
+int
+r535_instmem_new(const struct nvkm_instmem_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pinstmem)
+{
+ struct nvkm_instmem_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_instmem_dtor;
+ rm->fini = hw->fini;
+ rm->memory_new = hw->memory_new;
+ rm->memory_wrap = hw->memory_wrap;
+ rm->zero = false;
+ rm->set_bar0_window_addr = hw->set_bar0_window_addr;
+
+ ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
new file mode 100644
index 000000000000..1ac5628c5140
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
+
+#include <core/gpuobj.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+#include <engine/gr.h>
+
+#include <rm/engine.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+static u32
+r535_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ struct nvkm_gsp *gsp = chan->rm.object.client->gsp;
+
+ return gsp->rm->gpu->fifo.chan.doorbell_handle(chan);
+}
+
+static void
+r535_chan_stop(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_start(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+
+ nvkm_gsp_rm_free(&chan->rm.object);
+
+ dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
+ chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
+
+ nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
+}
+
+#define CHID_PER_USERD 8
+
+static int
+r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ const struct nvkm_rm_api *rmapi = device->gsp->rm->api;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id),
+ eT, chan->runq, priv, chan->id, chan->inst->addr,
+ nvkm_memory_addr(chan->userd.mem) + chan->userd.base,
+ chan->rm.mthdbuf.addr, chan->vmm, offset, length,
+ &chan->rm.object);
+ if (ret)
+ return ret;
+
+ if (1) {
+ NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
+
+ if (1) {
+ NVA06F_CTRL_BIND_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+ NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = eT;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+ NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->bEnable = 1;
+ ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_chan_func_ramfc
+r535_chan_ramfc = {
+ .write = r535_chan_ramfc_write,
+ .clear = r535_chan_ramfc_clear,
+ .devm = 0xfff,
+ .priv = true,
+};
+
+static const struct nvkm_chan_func
+r535_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gv100_chan_userd,
+ .ramfc = &r535_chan_ramfc,
+ .start = r535_chan_start,
+ .stop = r535_chan_stop,
+ .doorbell_handle = r535_chan_doorbell_handle,
+};
+
+static int
+r535_engn_nonstall(struct nvkm_engn *engn)
+{
+ struct nvkm_subdev *subdev = &engn->engine->subdev;
+ int ret;
+
+ ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+ WARN_ON(ret == -ENOENT);
+ return ret;
+}
+
+static const struct nvkm_engn_func
+r535_engn_ce = {
+ .nonstall = r535_engn_nonstall,
+};
+
+static int
+r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ /* RM requires GR context buffers to remain mapped until after the
+ * channel has been destroyed (as opposed to after the last gr obj
+ * has been deleted).
+ *
+ * Take an extra ref here, which will be released once the channel
+ * object has been deleted.
+ */
+ refcount_inc(&vctx->refs);
+ chan->rm.grctx = vctx;
+ return 0;
+}
+
+static const struct nvkm_engn_func
+r535_engn_gr = {
+ .nonstall = r535_engn_nonstall,
+ .ctor2 = r535_gr_ctor,
+};
+
+static int
+r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ struct nvkm_gsp_client *client = &chan->vmm->rm.client;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = client->object.handle;
+ ctrl->hObject = chan->rm.object.handle;
+ ctrl->hChanClient = client->object.handle;
+ ctrl->virtAddress = vctx->vma->addr;
+ ctrl->size = vctx->inst->size;
+ ctrl->engineType = engn->id;
+ ctrl->ChID = chan->id;
+
+ return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ int ret;
+
+ if (WARN_ON(!engn->rm.size))
+ return -EINVAL;
+
+ ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
+ &vctx->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+ if (ret)
+ return ret;
+
+ return r535_flcn_bind(engn, vctx, chan);
+}
+
+static const struct nvkm_engn_func
+r535_flcn = {
+ .nonstall = r535_engn_nonstall,
+ .ctor2 = r535_flcn_ctor,
+};
+
+static void
+r535_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static void
+r535_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static const struct nvkm_runl_func
+r535_runl = {
+ .block = r535_runl_block,
+ .allow = r535_runl_allow,
+};
+
+void
+r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid)
+{
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (!chan) {
+ nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid);
+ return;
+ }
+
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
+}
+
+static int
+r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA, OFA, 0);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+static int
+r535_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ u32 chids = 2048;
+ u32 first = rm->api->fifo->rsvd_chids;
+ u32 count = chids - first;
+ int ret;
+ NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
+
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid)))
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numEntries; i++) {
+ const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+ const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+
+ runl = nvkm_runl_get(fifo, id, addr);
+ if (!runl) {
+ runl = nvkm_runl_new(fifo, id, addr, 0);
+ if (WARN_ON(IS_ERR(runl)))
+ continue;
+ }
+ }
+
+ for (int i = 0; i < ctrl->numEntries; i++) {
+ const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+ const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
+ const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+ enum nvkm_subdev_type type;
+ int inst, nv2080;
+
+ runl = nvkm_runl_get(fifo, id, addr);
+ if (!runl)
+ continue;
+
+ inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080);
+ if (inst < 0) {
+ nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ /* Skip SW engine - there's currently no support for NV SW classes. */
+ if (type == NVKM_ENGINE_SW)
+ continue;
+
+ /* Skip lone GRCEs (ones not paired with GR on a runlist), as they
+ * don't appear to function as async copy engines.
+ */
+ if (type == NVKM_ENGINE_CE &&
+ rm->gpu->ce.grce_mask &&
+ (rm->gpu->ce.grce_mask(device) & BIT(inst)) &&
+ !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) {
+ RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ ret = nvkm_rm_engine_new(gsp->rm, type, inst);
+ if (ret) {
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ engn = NULL;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst);
+ break;
+ case NVKM_ENGINE_GR:
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ case NVKM_ENGINE_NVENC:
+ case NVKM_ENGINE_NVJPG:
+ case NVKM_ENGINE_OFA:
+ engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
+ break;
+ case NVKM_ENGINE_SW:
+ continue;
+ default:
+ engn = NULL;
+ break;
+ }
+
+ if (!engn) {
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+
+ {
+ NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ fifo->rm.mthdbuf_size = ctrl->size;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ }
+
+ return rm->api->fifo->ectx_size(fifo);
+}
+
+static void
+r535_fifo_dtor(struct nvkm_fifo *fifo)
+{
+ kfree(fifo->func);
+}
+
+int
+r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
+ struct nvkm_fifo_func *rm;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_fifo_dtor;
+ rm->runl_ctor = r535_fifo_runl_ctor;
+ rm->runl = &r535_runl;
+ rm->chan.user.oclass = gpu->fifo.chan.class;
+ rm->chan.func = &r535_chan;
+ rm->nonstall = &ga100_fifo_nonstall;
+ rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+
+ return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+}
+
+const struct nvkm_rm_api_fifo
+r535_fifo = {
+ .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type,
+ .ectx_size = r535_fifo_ectx_size,
+ .rc_triggered = r535_fifo_rc_triggered,
+ .chan = {
+ .alloc = r535_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644
index 000000000000..ddb57d5e73d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/gr.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+#include <engine/gr/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/vmm.h"
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+void
+r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ return;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+}
+
+static int
+r535_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ struct nvkm_gsp_object threed;
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
+ 1, 0, true, rm->api->fifo->rsvd_chids,
+ nvkm_memory_addr(golden.inst),
+ nvkm_memory_addr(golden.inst) + 0x1000,
+ nvkm_memory_addr(golden.inst) + 0x2000,
+ golden.vmm, 0, 0x1000, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ ret = rm->api->gr->get_ctxbufs_info(gr);
+ if (ret)
+ goto done;
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
+ if (ret)
+ goto done;
+
+ /* There's no need to keep the golden channel around, as RM caches the context. */
+ nvkm_gsp_rm_free(&threed);
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+const struct nvkm_rm_api_gr
+r535_gr = {
+ .get_ctxbufs_info = r535_gr_get_ctxbufs_info,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index db2602e88006..baf42339f93e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <rm/rpc.h>
+
#include "priv.h"
#include <core/pci.h>
+#include <subdev/pci/priv.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
@@ -30,29 +33,11 @@
#include <nvfw/fw.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+#include "nvrm/event.h"
+#include "nvrm/fifo.h"
#include <linux/acpi.h>
#include <linux/ctype.h>
@@ -60,990 +45,6 @@
extern struct dentry *nouveau_debugfs_root;
-#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
-
-/**
- * DOC: GSP message queue element
- *
- * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
- *
- * The GSP command queue and status queue are message queues for the
- * communication between software and GSP. The software submits the GSP
- * RPC via the GSP command queue, GSP writes the status of the submitted
- * RPC in the status queue.
- *
- * A GSP message queue element consists of three parts:
- *
- * - message element header (struct r535_gsp_msg), which mostly maintains
- * the metadata for queuing the element.
- *
- * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
- * of the RPC. E.g., the RPC function number.
- *
- * - The payload, where the RPC message stays. E.g. the params of a
- * specific RPC function. Some RPC functions also have their headers
- * in the payload. E.g. rm_alloc, rm_control.
- *
- * The memory layout of a GSP message element can be illustrated below::
- *
- * +------------------------+
- * | Message Element Header |
- * | (r535_gsp_msg) |
- * | |
- * | (r535_gsp_msg.data) |
- * | | |
- * |----------V-------------|
- * | GSP RPC Header |
- * | (nvfw_gsp_rpc) |
- * | |
- * | (nvfw_gsp_rpc.data) |
- * | | |
- * |----------V-------------|
- * | Payload |
- * | |
- * | header(optional) |
- * | params |
- * +------------------------+
- *
- * The max size of a message queue element is 16 pages (including the
- * headers). When a GSP message to be sent is larger than 16 pages, the
- * message should be split into multiple elements and sent accordingly.
- *
- * In the bunch of the split elements, the first element has the expected
- * function number, while the rest of the elements are sent with the
- * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
- *
- * GSP consumes the elements from the cmdq and always writes the result
- * back to the msgq. The result is also formed as split elements.
- *
- * Terminology:
- *
- * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
- * payload)
- * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
- * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
- * - gsp_rpc_len: size of (GSP RPC header + payload)
- * - params_size: size of params in the payload
- * - payload_size: size of (header if exists + params) in the payload
- */
-
-struct r535_gsp_msg {
- u8 auth_tag_buffer[16];
- u8 aad_buffer[16];
- u32 checksum;
- u32 sequence;
- u32 elem_count;
- u32 pad;
- u8 data[];
-};
-
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
-#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
-
-#define to_gsp_hdr(p, header) \
- container_of((void *)p, typeof(*header), data)
-
-#define to_payload_hdr(p, header) \
- container_of((void *)p, typeof(*header), params)
-
-static int
-r535_rpc_status_to_errno(uint32_t rpc_status)
-{
- switch (rpc_status) {
- case 0x55: /* NV_ERR_NOT_READY */
- case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EBUSY;
- case 0x51: /* NV_ERR_NO_MEMORY */
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
- u32 size, rptr = *gsp->msgq.rptr;
- int used;
-
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
- GSP_PAGE_SIZE);
- if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return -EINVAL;
-
- do {
- u32 wptr = *gsp->msgq.wptr;
-
- used = wptr + gsp->msgq.cnt - rptr;
- if (used >= gsp->msgq.cnt)
- used -= gsp->msgq.cnt;
- if (used >= size)
- break;
-
- usleep_range(1, 2);
- } while (--(*ptime));
-
- if (WARN_ON(!*ptime))
- return -ETIMEDOUT;
-
- return used;
-}
-
-static struct r535_gsp_msg *
-r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
-{
- u32 rptr = *gsp->msgq.rptr;
-
- /* Skip the first page, which is the message queue info */
- return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
- rptr * GSP_PAGE_SIZE);
-}
-
-/**
- * DOC: Receive a GSP message queue element
- *
- * Receiving a GSP message queue element from the message queue consists of
- * the following steps:
- *
- * - Peek the element from the queue: r535_gsp_msgq_peek().
- * Peek the first page of the element to determine the total size of the
- * message before allocating the proper memory.
- *
- * - Allocate memory for the message.
- * Once the total size of the message is determined from the GSP message
- * queue element, the caller of r535_gsp_msgq_recv() allocates the
- * required memory.
- *
- * - Receive the message: r535_gsp_msgq_recv().
- * Copy the message into the allocated memory. Advance the read pointer.
- * If the message is a large GSP message, r535_gsp_msgq_recv() calls
- * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
- * until the complete message is received.
- * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
- * the return of the large GSP message.
- *
- * - Free the allocated memory: r535_gsp_msg_done().
- * The user is responsible for freeing the memory allocated for the GSP
- * message pages after they have been processed.
- */
-static void *
-r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- int ret;
-
- ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- return mqe->data;
-}
-
-struct r535_gsp_msg_info {
- int *retries;
- u32 gsp_rpc_len;
- void *gsp_rpc_buf;
- bool continuation;
-};
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
-
-static void *
-r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
- struct r535_gsp_msg_info *info)
-{
- u8 *buf = info->gsp_rpc_buf;
- u32 rptr = *gsp->msgq.rptr;
- struct r535_gsp_msg *mqe;
- u32 size, expected, len;
- int ret;
-
- expected = info->gsp_rpc_len;
-
- ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- if (info->continuation) {
- struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
- nvkm_error(&gsp->subdev,
- "Not a continuation of a large RPC\n");
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- return ERR_PTR(-EIO);
- }
- }
-
- size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
-
- len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, expected, len);
-
- if (info->continuation)
- memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
- len - sizeof(struct nvfw_gsp_rpc));
- else
- memcpy(buf, mqe->data, len);
-
- expected -= len;
-
- if (expected) {
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(buf + len, mqe, expected);
- }
-
- rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
-
- mb();
- (*gsp->msgq.rptr) = rptr;
- return buf;
-}
-
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
- struct nvfw_gsp_rpc *rpc;
- struct r535_gsp_msg_info info = {0};
- u32 expected = gsp_rpc_len;
- void *buf;
-
- mqe = r535_gsp_msgq_get_entry(gsp);
- rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (WARN_ON(rpc->length > max_rpc_size))
- return NULL;
-
- buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- info.gsp_rpc_buf = buf;
- info.retries = retries;
- info.gsp_rpc_len = rpc->length;
-
- buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR(buf)) {
- kvfree(info.gsp_rpc_buf);
- info.gsp_rpc_buf = NULL;
- return buf;
- }
-
- if (expected <= max_rpc_size)
- return buf;
-
- info.gsp_rpc_buf += info.gsp_rpc_len;
- expected -= info.gsp_rpc_len;
-
- while (expected) {
- u32 size;
-
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- info.gsp_rpc_len = rpc->length;
- info.continuation = true;
-
- rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- size = info.gsp_rpc_len - sizeof(*rpc);
- expected -= size;
- info.gsp_rpc_buf += size;
- }
-
- rpc = buf;
- rpc->length = gsp_rpc_len;
- return buf;
-}
-
-static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
-{
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- struct r535_gsp_msg *cqe;
- u32 gsp_rpc_len = msg->checksum;
- u64 *ptr = (void *)msg;
- u64 *end;
- u64 csum = 0;
- int free, time = 1000000;
- u32 wptr, size, step, len;
- u32 off = 0;
-
- len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
-
- end = (u64 *)((char *)ptr + len);
- msg->pad = 0;
- msg->checksum = 0;
- msg->sequence = gsp->cmdq.seq++;
- msg->elem_count = DIV_ROUND_UP(len, 0x1000);
-
- while (ptr < end)
- csum ^= *ptr++;
-
- msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
-
- wptr = *gsp->cmdq.wptr;
- do {
- do {
- free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
- if (free >= gsp->cmdq.cnt)
- free -= gsp->cmdq.cnt;
- if (free >= 1)
- break;
-
- usleep_range(1, 2);
- } while(--time);
-
- if (WARN_ON(!time)) {
- kvfree(msg);
- return -ETIMEDOUT;
- }
-
- cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, len, step * GSP_PAGE_SIZE);
-
- memcpy(cqe, (u8 *)msg + off, size);
-
- wptr += DIV_ROUND_UP(size, 0x1000);
- if (wptr == gsp->cmdq.cnt)
- wptr = 0;
-
- off += size;
- len -= size;
- } while (len);
-
- nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
- wmb();
- (*gsp->cmdq.wptr) = wptr;
- mb();
-
- nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
-
- kvfree(msg);
- return 0;
-}
-
-static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
-{
- struct r535_gsp_msg *msg;
- u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
-
- size = ALIGN(size, GSP_MSG_MIN_SIZE);
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- msg->checksum = gsp_rpc_len;
- return msg->data;
-}
-
-static void
-r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
-{
- kvfree(msg);
-}
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
-{
- if (gsp->subdev.debug >= lvl) {
- nvkm_printk__(&gsp->subdev, lvl, info,
- "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
- msg->function, msg->length, msg->length - sizeof(*msg),
- msg->rpc_result, msg->rpc_result_private);
- print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
- msg->data, msg->length - sizeof(*msg), true);
- }
-}
-
-static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *rpc;
- int retries = 4000000, i;
-
-retry:
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->rpc_result) {
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EINVAL);
- }
-
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
-
- if (fn && rpc->function == fn) {
- if (gsp_rpc_len) {
- if (rpc->length < gsp_rpc_len) {
- nvkm_error(subdev, "rpc len %d < %d\n",
- rpc->length, gsp_rpc_len);
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EIO);
- }
-
- return rpc;
- }
-
- r535_gsp_msg_done(gsp, rpc);
- return NULL;
- }
-
- for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
- struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
-
- if (ntfy->fn == rpc->function) {
- if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
- rpc->length - sizeof(*rpc));
- break;
- }
- }
-
- if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
-
- r535_gsp_msg_done(gsp, rpc);
- if (fn)
- goto retry;
-
- if (*gsp->msgq.rptr != *gsp->msgq.wptr)
- goto retry;
-
- return NULL;
-}
-
-static int
-r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
-{
- int ret = 0;
-
- mutex_lock(&gsp->msgq.mutex);
- if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
- ret = -ENOSPC;
- } else {
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
- gsp->msgq.ntfy_nr++;
- }
- mutex_unlock(&gsp->msgq.mutex);
- return ret;
-}
-
-static int
-r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
-{
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- repv = r535_gsp_msg_recv(gsp, fn, 0);
- mutex_unlock(&gsp->cmdq.mutex);
- if (IS_ERR(repv))
- return PTR_ERR(repv);
-
- return 0;
-}
-
-static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
- u32 fn = rpc->function;
- void *repv = NULL;
- int ret;
-
- if (gsp->subdev.debug >= NV_DBG_TRACE) {
- nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
- rpc->length, rpc->length - sizeof(*rpc));
- print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
- rpc->data, rpc->length - sizeof(*rpc), true);
- }
-
- ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret)
- return ERR_PTR(ret);
-
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
-}
-
-static void
-r535_gsp_event_dtor(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
-
- mutex_lock(&gsp->client_id.mutex);
- if (event->func) {
- list_del(&event->head);
- event->func = NULL;
- }
- mutex_unlock(&gsp->client_id.mutex);
-
- nvkm_gsp_rm_free(&event->object);
- event->device = NULL;
-}
-
-static int
-r535_gsp_device_event_get(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
- NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->event = event->id;
- ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
- return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
-}
-
-static int
-r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
- nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
- NV0005_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
- NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
- &event->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hParentClient = client->object.handle;
- args->hSrcResource = 0;
- args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
- args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
- args->data = NULL;
-
- ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
- if (ret)
- return ret;
-
- event->device = device;
- event->id = id;
-
- ret = r535_gsp_device_event_get(event);
- if (ret) {
- nvkm_gsp_event_dtor(event);
- return ret;
- }
-
- mutex_lock(&gsp->client_id.mutex);
- event->func = func;
- list_add(&event->head, &client->events);
- mutex_unlock(&gsp->client_id.mutex);
- return 0;
-}
-
-static void
-r535_gsp_device_dtor(struct nvkm_gsp_device *device)
-{
- nvkm_gsp_rm_free(&device->subdevice);
- nvkm_gsp_rm_free(&device->object);
-}
-
-static int
-r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
-{
- NV2080_ALLOC_PARAMETERS *args;
-
- return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
- &device->subdevice);
-}
-
-static int
-r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
-{
- NV0080_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
- &device->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hClientShare = client->object.handle;
-
- ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
- if (ret)
- return ret;
-
- ret = r535_gsp_subdevice_ctor(device);
- if (ret)
- nvkm_gsp_rm_free(&device->object);
-
- return ret;
-}
-
-static void
-r535_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- struct nvkm_gsp *gsp = client->gsp;
-
- nvkm_gsp_rm_free(&client->object);
-
- mutex_lock(&gsp->client_id.mutex);
- idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
- mutex_unlock(&gsp->client_id.mutex);
-
- client->gsp = NULL;
-}
-
-static int
-r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- NV0000_ALLOC_PARAMETERS *args;
- int ret;
-
- mutex_lock(&gsp->client_id.mutex);
- ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
- mutex_unlock(&gsp->client_id.mutex);
- if (ret < 0)
- return ret;
-
- client->gsp = gsp;
- client->object.client = client;
- INIT_LIST_HEAD(&client->events);
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
- &client->object);
- if (IS_ERR(args)) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- args->hClient = client->object.handle;
- args->processID = ~0;
-
- ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
- if (ret) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- return 0;
-}
-
-static int
-r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_free_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
- client->object.handle, object->handle);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
- if (WARN_ON(IS_ERR_OR_NULL(rpc)))
- return -EIO;
-
- rpc->params.hRoot = client->object.handle;
- rpc->params.hObjectParent = 0;
- rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
-}
-
-static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- void *ret = NULL;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->status) {
- ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- }
-
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
- u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_alloc_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
- client->object.handle, object->parent->handle,
- object->handle);
-
- nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
- params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hParent = object->parent->handle;
- rpc->hObject = object->handle;
- rpc->hClass = oclass;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- if (!params)
- return;
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- int ret = 0;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc)) {
- *params = NULL;
- return PTR_ERR(rpc);
- }
-
- if (rpc->status) {
- ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN && ret != -EBUSY)
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- }
-
- if (repc)
- *params = rpc->params;
- else
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_control_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
- client->object.handle, object->handle, cmd, params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hObject = object->handle;
- rpc->cmd = cmd;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
-{
- struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
-
- r535_gsp_msg_done(gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
-{
- struct nvfw_gsp_rpc *rpc;
-
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
- sizeof(u64)));
- if (IS_ERR(rpc))
- return ERR_CAST(rpc);
-
- rpc->header_version = 0x03000000;
- rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
- rpc->function = fn;
- rpc->rpc_result = 0xffffffff;
- rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + payload_size;
- return rpc->data;
-}
-
-static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
- const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
- u32 payload_size = rpc->length - sizeof(*rpc);
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- if (payload_size > max_payload_size) {
- const u32 fn = rpc->function;
- u32 remain_payload_size = payload_size;
-
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
-
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += max_payload_size;
- remain_payload_size -= max_payload_size;
-
- /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (remain_payload_size) {
- u32 size = min(remain_payload_size,
- max_payload_size);
- void *next;
-
- next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
- if (IS_ERR(next)) {
- repv = next;
- goto done;
- }
-
- memcpy(next, payload, size);
-
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += size;
- remain_payload_size -= size;
- }
-
- /* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
- } else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
- }
-
-done:
- mutex_unlock(&gsp->cmdq.mutex);
- return repv;
-}
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
- .rpc_get = r535_gsp_rpc_get,
- .rpc_push = r535_gsp_rpc_push,
- .rpc_done = r535_gsp_rpc_done,
-
- .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
- .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
- .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
-
- .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
- .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
- .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
-
- .rm_free = r535_gsp_rpc_rm_free,
-
- .client_ctor = r535_gsp_client_ctor,
- .client_dtor = r535_gsp_client_dtor,
-
- .device_ctor = r535_gsp_device_ctor,
- .device_dtor = r535_gsp_device_dtor,
-
- .event_ctor = r535_gsp_device_event_ctor,
- .event_dtor = r535_gsp_event_dtor,
-};
-
static void
r535_gsp_msgq_work(struct work_struct *work)
{
@@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth)
return IRQ_HANDLED;
}
+static bool
+r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
{
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret = 0;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
- switch (ctrl->table[i].engineIdx) {
- case MC_ENGINE_IDX_GSP:
- type = NVKM_SUBDEV_GSP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_DISP:
- type = NVKM_ENGINE_DISP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
- type = NVKM_ENGINE_CE;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
- break;
- case MC_ENGINE_IDX_GR0:
- type = NVKM_ENGINE_GR;
- inst = 0;
- break;
- case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
- type = NVKM_ENGINE_NVDEC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
- break;
- case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
- type = NVKM_ENGINE_NVENC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
- break;
- case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
- type = NVKM_ENGINE_NVJPG;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
- break;
- case MC_ENGINE_IDX_OFA0:
- type = NVKM_ENGINE_OFA;
- inst = 0;
- break;
- default:
+ if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
continue;
- }
if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
ret = -ENOSPC;
@@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
return ret;
}
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+void
+r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info)
{
- GspStaticConfigInfo *rpc;
int last_usable = -1;
- rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
- if (IS_ERR(rpc))
- return PTR_ERR(rpc);
-
- gsp->internal.client.object.client = &gsp->internal.client;
- gsp->internal.client.object.parent = NULL;
- gsp->internal.client.object.handle = rpc->hInternalClient;
- gsp->internal.client.gsp = gsp;
-
- gsp->internal.device.object.client = &gsp->internal.client;
- gsp->internal.device.object.parent = &gsp->internal.client.object;
- gsp->internal.device.object.handle = rpc->hInternalDevice;
-
- gsp->internal.device.subdevice.client = &gsp->internal.client;
- gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
- gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
- gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
- gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
- for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
- NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
- &rpc->fbRegionInfoParams.fbRegion[i];
+ for (int i = 0; i < info->numFBRegions; i++) {
+ const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i];
nvkm_debug(&gsp->subdev, "fb region %d: "
"%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
@@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
if (last_usable >= 0) {
- u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+ u32 rsvd_base = info->fbRegion[last_usable].limit + 1;
gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
}
+}
+
+static int
+r535_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
@@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
-static void
+void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
@@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
* so we take a device reference to ensure its lifetime. The reference is
* dropped in the destructor.
*/
-static int
+int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
@@ -1277,9 +293,10 @@ static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret;
- ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ ret = rmapi->gsp->get_static_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1692,7 +709,7 @@ fail:
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
+void
r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
{
const guid_t NVOP_DSM_GUID =
@@ -1726,7 +743,7 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
kfree(argv4.buffer.pointer);
}
-static void
+void
r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
{
const guid_t JT_DSM_GUID =
@@ -1818,7 +835,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux
}
}
-static void
+void
r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
{
acpi_status status;
@@ -1871,7 +888,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
}
static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+r535_gsp_set_system_info(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
@@ -1884,16 +901,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
if (IS_ERR(info))
return PTR_ERR(info);
- info->gpuPhysAddr = device->func->resource_addr(device, 0);
- info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
- info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
info->maxUserVa = TASK_SIZE;
- info->pciConfigMirrorBase = 0x088000;
- info->pciConfigMirrorSize = 0x001000;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
@@ -1911,33 +928,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
- rpc_rc_triggered_v17_02 *msg = repv;
- struct nvkm_gsp *gsp = priv;
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_chan *chan;
- unsigned long flags;
-
- if (WARN_ON(repc < sizeof(*msg)))
- return -EINVAL;
-
- nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
- msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
- msg->partitionAttributionId);
-
- chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
- if (!chan) {
- nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
- return 0;
- }
-
- nvkm_chan_error(chan, false);
- nvkm_chan_put(&chan, flags);
- return 0;
-}
-
-static int
r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -2130,97 +1120,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_device *device = subdev->device;
- u32 wpr2_hi;
- int ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (!wpr2_hi) {
- nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
- return 0;
- }
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (WARN_ON(ret))
- return ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (WARN_ON(wpr2_hi))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- int ret;
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (ret)
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
- if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
- GspFwWprMeta *meta;
- int ret;
-
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
- if (ret)
- return ret;
-
- meta = gsp->wpr_meta.data;
-
- meta->magic = GSP_FW_WPR_META_MAGIC;
- meta->revision = GSP_FW_WPR_META_REVISION;
-
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
- meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
- meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
- meta->sizeOfBootloader = gsp->boot.fw.size;
- meta->bootloaderCodeOffset = gsp->boot.code_offset;
- meta->bootloaderDataOffset = gsp->boot.data_offset;
- meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
- meta->sysmemAddrOfSignature = gsp->sig.addr;
- meta->sizeOfSignature = gsp->sig.size;
-
- meta->gspFwRsvdStart = gsp->fb.heap.addr;
- meta->nonWprHeapOffset = gsp->fb.heap.addr;
- meta->nonWprHeapSize = gsp->fb.heap.size;
- meta->gspFwWprStart = gsp->fb.wpr2.addr;
- meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
- meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
- meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
- meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
- meta->frtsOffset = gsp->fb.wpr2.frts.addr;
- meta->frtsSize = gsp->fb.wpr2.frts.size;
- meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
- meta->fbSize = gsp->fb.size;
- meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
- meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
- meta->bootCount = 0;
- meta->partitionRpcAddr = 0;
- meta->partitionRpcRequestOffset = 0;
- meta->partitionRpcReplyOffset = 0;
- meta->verified = 0;
- return 0;
-}
-
-static int
r535_gsp_shared_init(struct nvkm_gsp *gsp)
{
struct {
@@ -2271,23 +1170,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp)
return 0;
}
-static int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+static void
+r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
{
- GSP_ARGUMENTS_CACHED *args;
- int ret;
-
- if (!resume) {
- ret = r535_gsp_shared_init(gsp);
- if (ret)
- return ret;
+ GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data;
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
- if (ret)
- return ret;
- }
-
- args = gsp->rmargs.data;
args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
args->messageQueueInitArguments.cmdQueueOffset =
@@ -2304,7 +1191,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
args->srInitArguments.flags = 0;
args->srInitArguments.bInPMTransition = 1;
}
+}
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+
+ gsp->rm->api->gsp->set_rmargs(gsp, resume);
return 0;
}
@@ -2797,18 +1701,22 @@ lvl1_fail:
return ret;
}
+static u32
+r535_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return meta->gspFwWprEnd - meta->gspFwWprStart;
+}
+
int
r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
{
- u32 mbox0 = 0xff, mbox1 = 0xff;
+ struct nvkm_rm *rm = gsp->rm;
int ret;
- if (!gsp->running)
- return 0;
-
if (suspend) {
- GspFwWprMeta *meta = gsp->wpr_meta.data;
- u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ u32 len = rm->api->gsp->sr_data_size(gsp);
GspFwSRMeta *sr;
ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
@@ -2829,8 +1737,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ ret = rm->api->fbsr->suspend(gsp);
+ if (ret) {
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
@@ -2838,18 +1751,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
return ret;
nvkm_msec(gsp->subdev.device, 2000,
- if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
break;
);
- nvkm_falcon_reset(&gsp->falcon);
-
- ret = nvkm_gsp_fwsec_sb(gsp);
- WARN_ON(ret);
-
- ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
- WARN_ON(ret);
-
gsp->running = false;
return 0;
}
@@ -2857,23 +1762,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
int
r535_gsp_init(struct nvkm_gsp *gsp)
{
- u32 mbox0, mbox1;
int ret;
- if (!gsp->sr.meta.data) {
- mbox0 = lower_32_bits(gsp->wpr_meta.addr);
- mbox1 = upper_32_bits(gsp->wpr_meta.addr);
- } else {
- r535_gsp_rmargs_init(gsp, true);
-
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
- }
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
- /* Execute booter to handle (eventually...) booting GSP-RM. */
- ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
- if (WARN_ON(ret))
- goto done;
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
if (ret)
@@ -2883,6 +1777,8 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
+ gsp->rm->api->fbsr->resume(gsp);
+
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
@@ -2944,19 +1840,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u
return -ENOENT;
}
-static void
-r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
-{
- nvkm_firmware_put(gsp->fws.bl);
- gsp->fws.bl = NULL;
- nvkm_firmware_put(gsp->fws.booter.unload);
- gsp->fws.booter.unload = NULL;
- nvkm_firmware_put(gsp->fws.booter.load);
- gsp->fws.booter.load = NULL;
- nvkm_firmware_put(gsp->fws.rm);
- gsp->fws.rm = NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log {
@@ -3190,10 +2073,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_falcon_fw_dtor(&gsp->booter.unload);
nvkm_falcon_fw_dtor(&gsp->booter.load);
+ nvkm_gsp_mem_dtor(&gsp->fmc.args);
+ kfree(gsp->fmc.sig);
+ kfree(gsp->fmc.pkey);
+ kfree(gsp->fmc.hash);
+ nvkm_gsp_mem_dtor(&gsp->fmc.fw);
+
mutex_destroy(&gsp->msgq.mutex);
mutex_destroy(&gsp->cmdq.mutex);
- r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
@@ -3206,10 +2095,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_gsp_mem_dtor(&gsp->logrm);
}
+static void
+r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp)
+{
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+}
+
int
r535_gsp_oneinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
const u8 *data;
u64 size;
int ret;
@@ -3217,16 +2113,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
mutex_init(&gsp->cmdq.mutex);
mutex_init(&gsp->msgq.mutex);
- ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
- &device->sec2->falcon, &gsp->booter.load);
- if (ret)
- return ret;
-
- ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
- &device->sec2->falcon, &gsp->booter.unload);
- if (ret)
- return ret;
-
/* Load GSP firmware from ELF image into DMA-accessible memory. */
ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
if (ret)
@@ -3255,65 +2141,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
- r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+ if (rmapi->gsp->drop_send_user_shared_data)
+ rmapi->gsp->drop_send_user_shared_data(gsp);
+ if (rmapi->gsp->drop_post_nocat_record)
+ rmapi->gsp->drop_post_nocat_record(gsp);
+
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
/* Release FW images - we've copied them to DMA buffers now. */
- r535_gsp_dtor_fws(gsp);
-
- /* Calculate FB layout. */
- gsp->fb.wpr2.frts.size = 0x100000;
- gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
- gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
- gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
- gsp->fb.wpr2.elf.size = gsp->fw.len;
- gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
- {
- u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
- gsp->fb.wpr2.heap.size =
- gsp->func->wpr_heap.os_carveout_size +
- gsp->func->wpr_heap.base_size +
- ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
- ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
- gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
- }
-
- gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
- gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
- gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
- gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
- gsp->fb.heap.size = 0x100000;
- gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
+ nvkm_gsp_dtor_fws(gsp);
ret = r535_gsp_libos_init(gsp);
if (WARN_ON(ret))
return ret;
- ret = r535_gsp_wpr_meta_init(gsp);
- if (WARN_ON(ret))
- return ret;
-
- ret = r535_gsp_rpc_set_system_info(gsp);
+ ret = rmapi->gsp->set_system_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -3321,76 +2171,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
if (WARN_ON(ret))
return ret;
- /* Reset GSP into RISC-V mode. */
- ret = gsp->func->reset(gsp);
- if (WARN_ON(ret))
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
- nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
mutex_init(&gsp->client_id.mutex);
idr_init(&gsp->client_id.idr);
return 0;
}
-static int
-r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
- const struct firmware **pfw)
-{
- char fwname[64];
-
- snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
- return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
-}
-
-int
-r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- int ret;
- bool enable_gsp = fwif->enable;
-
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
- return -EINVAL;
-
- if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
- (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
- r535_gsp_dtor_fws(gsp);
- return ret;
- }
-
- return 0;
-}
-
-#define NVKM_GSP_FIRMWARE(chip) \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
-
-NVKM_GSP_FIRMWARE(tu102);
-NVKM_GSP_FIRMWARE(tu104);
-NVKM_GSP_FIRMWARE(tu106);
-
-NVKM_GSP_FIRMWARE(tu116);
-NVKM_GSP_FIRMWARE(tu117);
-
-NVKM_GSP_FIRMWARE(ga100);
-
-NVKM_GSP_FIRMWARE(ga102);
-NVKM_GSP_FIRMWARE(ga103);
-NVKM_GSP_FIRMWARE(ga104);
-NVKM_GSP_FIRMWARE(ga106);
-NVKM_GSP_FIRMWARE(ga107);
-
-NVKM_GSP_FIRMWARE(ad102);
-NVKM_GSP_FIRMWARE(ad103);
-NVKM_GSP_FIRMWARE(ad104);
-NVKM_GSP_FIRMWARE(ad106);
-NVKM_GSP_FIRMWARE(ad107);
+const struct nvkm_rm_api_gsp
+r535_gsp = {
+ .set_rmargs = r535_gsp_set_rmargs,
+ .set_system_info = r535_gsp_set_system_info,
+ .get_static_info = r535_gsp_get_static_info,
+ .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx,
+ .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data,
+ .sr_data_size = r535_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
new file mode 100644
index 000000000000..a8c42ec0367b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvdec.h"
+
+static int
+r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvdec)
+{
+ NV_BSP_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvdec, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvdec = {
+ .alloc = r535_nvdec_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
new file mode 100644
index 000000000000..acb3ce8bb9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvenc.h"
+
+static int
+r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvenc)
+{
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvenc, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvenc = {
+ .alloc = r535_nvenc_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644
index 000000000000..fbc4080ad8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvjpg.h"
+
+static int
+r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvjpg)
+{
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvjpg, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvjpg = {
+ .alloc = r535_nvjpg_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
new file mode 100644
index 000000000000..cbc7e611fbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ALLOC_H__
+#define __NVRM_ALLOC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
new file mode 100644
index 000000000000..60b0b08491ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_BAR_H__
+#define __NVRM_BAR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
new file mode 100644
index 000000000000..90b0325203d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CE_H__
+#define __NVRM_CE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
new file mode 100644
index 000000000000..df0e63c0cb6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
new file mode 100644
index 000000000000..77f10acd82c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CTRL_H__
+#define __NVRM_CTRL_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
new file mode 100644
index 000000000000..3933b9ad61ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DEVICE_H__
+#define __NVRM_DEVICE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
new file mode 100644
index 000000000000..7b7539639540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1) // System memory (PCI)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
new file mode 100644
index 000000000000..b26dfc8f8087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_MSENC 38
+#define MC_ENGINE_IDX_MSENC1 39
+#define MC_ENGINE_IDX_MSENC2 40
+#define MC_ENGINE_IDX_C2C 41
+#define MC_ENGINE_IDX_LTC 42
+#define MC_ENGINE_IDX_FBHUB 43
+#define MC_ENGINE_IDX_HDACODEC 44
+#define MC_ENGINE_IDX_GMMU 45
+#define MC_ENGINE_IDX_SEC2 46
+#define MC_ENGINE_IDX_FSP 47
+#define MC_ENGINE_IDX_NVLINK 48
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 51
+#define MC_ENGINE_IDX_NVJPEG2 52
+#define MC_ENGINE_IDX_NVJPEG3 53
+#define MC_ENGINE_IDX_NVJPEG4 54
+#define MC_ENGINE_IDX_NVJPEG5 55
+#define MC_ENGINE_IDX_NVJPEG6 56
+#define MC_ENGINE_IDX_NVJPEG7 57
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58
+#define MC_ENGINE_IDX_ACCESS_CNTR 59
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_INFO_FAULT 63
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 65
+#define MC_ENGINE_IDX_NVDEC2 66
+#define MC_ENGINE_IDX_NVDEC3 67
+#define MC_ENGINE_IDX_NVDEC4 68
+#define MC_ENGINE_IDX_NVDEC5 69
+#define MC_ENGINE_IDX_NVDEC6 70
+#define MC_ENGINE_IDX_NVDEC7 71
+#define MC_ENGINE_IDX_CPU_DOORBELL 72
+#define MC_ENGINE_IDX_PRIV_DOORBELL 73
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 74
+#define MC_ENGINE_IDX_BLG 75
+#define MC_ENGINE_IDX_PERFMON 76
+#define MC_ENGINE_IDX_BUF_RESET 77
+#define MC_ENGINE_IDX_XBAR 78
+#define MC_ENGINE_IDX_ZPW 79
+#define MC_ENGINE_IDX_OFA0 80
+#define MC_ENGINE_IDX_TEGRA 81
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 83
+#define MC_ENGINE_IDX_GR2 84
+#define MC_ENGINE_IDX_GR3 85
+#define MC_ENGINE_IDX_GR4 86
+#define MC_ENGINE_IDX_GR5 87
+#define MC_ENGINE_IDX_GR6 88
+#define MC_ENGINE_IDX_GR7 89
+#define MC_ENGINE_IDX_ESCHED 90
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 154
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 155
+#define MC_ENGINE_IDX_GR2_FECS_LOG 156
+#define MC_ENGINE_IDX_GR3_FECS_LOG 157
+#define MC_ENGINE_IDX_GR4_FECS_LOG 158
+#define MC_ENGINE_IDX_GR5_FECS_LOG 159
+#define MC_ENGINE_IDX_GR6_FECS_LOG 160
+#define MC_ENGINE_IDX_GR7_FECS_LOG 161
+#define MC_ENGINE_IDX_TMR_SWRL 162
+#define MC_ENGINE_IDX_DISP_GSP 163
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
+#define MC_ENGINE_IDX_PXUC 166
+#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
+#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
+#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
+#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x))
+#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x))
+#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x))
+#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x))
+#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x))
+#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_LAST (0x0000003e)
+#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff)
+#define NV2080_ENGINE_TYPE_COPY_SIZE 10
+#define NV2080_ENGINE_TYPE_NVENC_SIZE 3
+#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8
+#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8
+#define NV2080_ENGINE_TYPE_GR_SIZE 8
+#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i))
+#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9))
+#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0)
+#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0)
+#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0)
+#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE)))
+#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0)
+#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i))
+#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE)))
+#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0)
+#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST)))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
new file mode 100644
index 000000000000..057f7220c225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_EVENT_H__
+#define __NVRM_EVENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
new file mode 100644
index 000000000000..28786ef013a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
new file mode 100644
index 000000000000..325fdd8b6090
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
new file mode 100644
index 000000000000..82c5ec727bb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#include "fifo.h"
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
new file mode 100644
index 000000000000..b6683a5bf870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
+} PACKED_REGISTRY_TABLE;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef NvU64 LibosAddress;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+
+#define GSP_FW_SR_META_REVISION 2
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+typedef union rpc_message_rpc_union_field_v03_00
+{
+ NvU32 spare;
+ NvU32 cpuRmGfid;
+} rpc_message_rpc_union_field_v03_00;
+
+typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v;
+
+typedef struct rpc_message_header_v03_00
+{
+ NvU32 header_version;
+ NvU32 signature;
+ NvU32 length;
+ NvU32 function;
+ NvU32 rpc_result;
+ NvU32 rpc_result_private;
+ NvU32 sequence;
+ rpc_message_rpc_union_field_v u;
+ rpc_generic_union rpc_message_data[];
+} rpc_message_header_v03_00;
+
+typedef rpc_message_header_v03_00 rpc_message_header_v;
+
+typedef struct GSP_MSG_QUEUE_ELEMENT
+{
+ NvU8 authTagBuffer[16]; // Authentication tag buffer.
+ NvU8 aadBuffer[16]; // AAD buffer.
+ NvU32 checkSum; // Set to value needed to make checksum always zero.
+ NvU32 seqNum; // Sequence number maintained by the message queue.
+ NvU32 elemCount; // Number of message queue elements this message has.
+ NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8);
+} GSP_MSG_QUEUE_ELEMENT;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
new file mode 100644
index 000000000000..642c13aec325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
new file mode 100644
index 000000000000..3a04e702677f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVDEC_H__
+#define __NVRM_NVDEC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
new file mode 100644
index 000000000000..203c1d5304d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVENC_H__
+#define __NVRM_NVENC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
new file mode 100644
index 000000000000..71fc53889ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVJPG_H__
+#define __NVRM_NVJPG_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
new file mode 100644
index 000000000000..49d81c7673d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2a037acc6b1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef X
+# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP) // 0
+ X(RM, SET_GUEST_SYSTEM_INFO) // 1
+ X(RM, ALLOC_ROOT) // 2
+ X(RM, ALLOC_DEVICE) // 3 deprecated
+ X(RM, ALLOC_MEMORY) // 4
+ X(RM, ALLOC_CTX_DMA) // 5
+ X(RM, ALLOC_CHANNEL_DMA) // 6
+ X(RM, MAP_MEMORY) // 7
+ X(RM, BIND_CTX_DMA) // 8 deprecated
+ X(RM, ALLOC_OBJECT) // 9
+ X(RM, FREE) //10
+ X(RM, LOG) //11
+ X(RM, ALLOC_VIDMEM) //12
+ X(RM, UNMAP_MEMORY) //13
+ X(RM, MAP_MEMORY_DMA) //14
+ X(RM, UNMAP_MEMORY_DMA) //15
+ X(RM, GET_EDID) //16
+ X(RM, ALLOC_DISP_CHANNEL) //17
+ X(RM, ALLOC_DISP_OBJECT) //18
+ X(RM, ALLOC_SUBDEVICE) //19
+ X(RM, ALLOC_DYNAMIC_MEMORY) //20
+ X(RM, DUP_OBJECT) //21
+ X(RM, IDLE_CHANNELS) //22
+ X(RM, ALLOC_EVENT) //23
+ X(RM, SEND_EVENT) //24
+ X(RM, REMAPPER_CONTROL) //25 deprecated
+ X(RM, DMA_CONTROL) //26
+ X(RM, DMA_FILL_PTE_MEM) //27
+ X(RM, MANAGE_HW_RESOURCE) //28
+ X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated
+ X(RM, CREATE_FB_SEGMENT) //30
+ X(RM, DESTROY_FB_SEGMENT) //31
+ X(RM, ALLOC_SHARE_DEVICE) //32
+ X(RM, DEFERRED_API_CONTROL) //33
+ X(RM, REMOVE_DEFERRED_API) //34
+ X(RM, SIM_ESCAPE_READ) //35
+ X(RM, SIM_ESCAPE_WRITE) //36
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37
+ X(RM, FREE_VIDMEM_VIRT) //38
+ X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP
+ X(RM, PERF_GET_PERFMON_SAMPLE) //40
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated
+ X(RM, PERF_GET_LEVEL_INFO) //42
+ X(RM, MAP_SEMA_MEMORY) //43
+ X(RM, UNMAP_SEMA_MEMORY) //44
+ X(RM, SET_SURFACE_PROPERTIES) //45
+ X(RM, CLEANUP_SURFACE) //46
+ X(RM, UNLOADING_GUEST_DRIVER) //47
+ X(RM, TDR_SET_TIMEOUT_STATE) //48
+ X(RM, SWITCH_TO_VGA) //49
+ X(RM, GPU_EXEC_REG_OPS) //50
+ X(RM, GET_STATIC_INFO) //51
+ X(RM, ALLOC_VIRTMEM) //52
+ X(RM, UPDATE_PDE_2) //53
+ X(RM, SET_PAGE_DIRECTORY) //54
+ X(RM, GET_STATIC_PSTATE_INFO) //55
+ X(RM, TRANSLATE_GUEST_GPU_PTES) //56
+ X(RM, RESERVED_57) //57
+ X(RM, RESET_CURRENT_GR_CONTEXT) //58
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59
+ X(RM, GET_ENGINE_UTILIZATION) //60
+ X(RM, UPDATE_GPU_PDES) //61
+ X(RM, GET_ENCODER_CAPACITY) //62
+ X(RM, VGPU_PF_REG_READ32) //63
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64
+ X(GSP, GET_GSP_STATIC_INFO) //65
+ X(RM, RMFS_INIT) //66
+ X(RM, RMFS_CLOSE_QUEUE) //67
+ X(RM, RMFS_CLEANUP) //68
+ X(RM, RMFS_TEST) //69
+ X(RM, UPDATE_BAR_PDE) //70
+ X(RM, CONTINUATION_RECORD) //71
+ X(RM, GSP_SET_SYSTEM_INFO) //72
+ X(RM, SET_REGISTRY) //73
+ X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated
+ X(GSP, GSP_RM_CONTROL) //76
+ X(RM, GET_STATIC_INFO2) //77
+ X(RM, DUMP_PROTOBUF_COMPONENT) //78
+ X(RM, UNSET_PAGE_DIRECTORY) //79
+ X(RM, GET_CONSOLIDATED_STATIC_INFO) //80
+ X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE) //85
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87
+ X(RM, CTRL_RESET_CHANNEL) //88
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91
+ X(RM, CTRL_PERF_BOOST) //92
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96
+ X(RM, CTRL_GPFIFO_SCHEDULE) //97
+ X(RM, CTRL_SET_TIMESLICE) //98
+ X(RM, CTRL_PREEMPT) //99
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102
+ X(GSP, GSP_RM_ALLOC) //103
+ X(RM, CTRL_GET_P2P_CAPS_V2) //104
+ X(RM, CTRL_CIPHER_AES_ENCRYPT) //105
+ X(RM, CTRL_CIPHER_SESSION_KEY) //106
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110
+ X(RM, CTRL_GPU_PROMOTE_CTX) //111
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114
+ X(RM, CTRL_GPU_INITIALIZE_CTX) //115
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120
+ X(RM, CTRL_GET_CE_PCE_MASK) //121
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123
+ X(RM, CTRL_GET_NVLINK_STATUS) //124
+ X(RM, CTRL_GET_P2P_CAPS) //125
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126
+ X(RM, RESERVED_0) //127
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128
+ X(RM, CTRL_RESERVE_HWPM_LEGACY) //129
+ X(RM, CTRL_B0CC_EXEC_REG_OPS) //130
+ X(RM, CTRL_BIND_PM_RESOURCES) //131
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132
+ X(RM, CTRL_DBG_RESUME_CONTEXT) //133
+ X(RM, CTRL_DBG_EXEC_REG_OPS) //134
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139
+ X(RM, CTRL_ALLOC_PMA_STREAM) //140
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141
+ X(RM, CTRL_FB_GET_INFO_V2) //142
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145
+ X(RM, CTRL_GPU_EVICT_CTX) //146
+ X(RM, CTRL_FB_GET_FS_INFO) //147
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148
+ X(RM, CTRL_STOP_CHANNEL) //149
+ X(RM, CTRL_GR_PC_SAMPLING_MODE) //150
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152
+ X(RM, CTRL_FREE_PMA_STREAM) //153
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161
+ X(UVM, UVM_PAGING_CHANNEL_MAP) //162
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167
+ X(RM, DCE_RM_INIT) //168
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170
+ X(RM, GET_PLCABLE_ADDRESS_KIND) //171
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173
+ X(RM, CTRL_GET_MMU_DEBUG_MODE) //174
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177
+ X(RM, DISABLE_CHANNELS) //178
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179
+ X(RM, CTRL_FABRIC_MEM_STATS) //180
+ X(RM, SAVE_HIBERNATION_DATA) //181
+ X(RM, RESTORE_HIBERNATION_DATA) //182
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191
+ X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194
+ X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196
+ X(RM, CTRL_GET_HS_CREDITS) // 197
+ X(RM, CTRL_SET_HS_CREDITS) // 198
+ X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
+ X(RM, INVALIDATE_TLB) // 200
+ X(RM, RESERVED_201) // 201
+ X(RM, ECC_NOTIFIER_WRITE_ACK) // 202
+ X(RM, NUM_FUNCTIONS) //END
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
new file mode 100644
index 000000000000..f6ec04efd119
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_VMM_H__
+#define __NVRM_VMM_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_
+ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+ NvU32 numEntries;
+ NvU32 flags;
+ NvHandle hVASpace;
+ NvU32 chId;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+ NvU32 pasid;
+} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS;
+
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U)
+
+#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS {
+ NvHandle hVASpace;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
new file mode 100644
index 000000000000..2156808cba4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ofa = {
+ .alloc = r535_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
new file mode 100644
index 000000000000..a4190676e1ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_api
+r535_api = {
+ .gsp = &r535_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r535_client,
+ .device = &r535_device,
+ .fbsr = &r535_fbsr,
+ .disp = &r535_disp,
+ .fifo = &r535_fifo,
+ .ce = &r535_ce,
+ .gr = &r535_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r535_ofa,
+};
+
+const struct nvkm_rm_impl
+r535_rm_tu102 = {
+ .wpr = &r535_wpr_libos2,
+ .api = &r535_api,
+};
+
+const struct nvkm_rm_impl
+r535_rm_ga102 = {
+ .wpr = &r535_wpr_libos3,
+ .api = &r535_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
new file mode 100644
index 000000000000..5acb98d137bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/rpcfn.h"
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EBUSY;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
+{
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return -EINVAL;
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return -ETIMEDOUT;
+
+ return used;
+}
+
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
+
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
+
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
+
+ expected -= len;
+
+ if (expected) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(buf + len, mqe, expected);
+ }
+
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return buf;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
+{
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ struct r535_gsp_msg *cqe;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size, step, len;
+ u32 off = 0;
+
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(msg);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
+
+ memcpy(cqe, (u8 *)msg + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ len -= size;
+ } while (len);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(msg);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
+{
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
+}
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
+
+retry:
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
+
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EIO);
+ }
+
+ return rpc;
+ }
+
+ r535_gsp_msg_done(gsp, rpc);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == rpc->function) {
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, rpc);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ u32 fn = rpc->function;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + payload_size;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (payload_size > max_payload_size) {
+ const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
+
+ /* Adjust length, and send initial RPC. */
+ rpc->length = sizeof(*rpc) + max_payload_size;
+ msg->checksum = rpc->length;
+
+ repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
+ void *next;
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += size;
+ remain_payload_size -= size;
+ }
+
+ /* Wait for reply. */
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
+ } else {
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_rm_api_rpc
+r535_rpc = {
+ .get = r535_gsp_rpc_get,
+ .push = r535_gsp_rpc_push,
+ .done = r535_gsp_rpc_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
new file mode 100644
index 000000000000..52f2e5f14517
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/mmu/vmm.h>
+
+#include <nvhw/drf.h>
+#include "nvrm/vmm.h"
+
+void
+r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
+{
+ if (vmm->rm.external) {
+ NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (!IS_ERR(ctrl)) {
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
+ }
+
+ vmm->rm.external = false;
+ }
+
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+}
+
+int
+r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
+{
+ NV_VASPACE_ALLOCATION_PARAMETERS *args;
+ int ret;
+
+ ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
+ &vmm->rm.client, &vmm->rm.device);
+ if (ret)
+ return ret;
+
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
+ sizeof(*args), &vmm->rm.object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+ if (external)
+ args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
+
+ ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
+ if (ret)
+ return ret;
+
+ if (!external) {
+ NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ u8 page_shift = 29; /* 512MiB */
+ const u64 page_size = BIT_ULL(page_shift);
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm_pt *pd = vmm->pd;
+
+ for (page = vmm->func->page; page->shift; page++) {
+ if (page->shift == page_shift)
+ break;
+ }
+
+ if (WARN_ON(!page->shift))
+ return -EINVAL;
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
+ &vmm->rm.rsvd);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ /* Some parts of RM expect the server-reserved area to be in a specific location. */
+ if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
+ vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
+ NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->pageSize = page_size;
+ ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+ ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+
+ for (desc = page->desc; desc->bits; desc++) {
+ ctrl->numLevelsToCopy++;
+ page_shift += desc->bits;
+ }
+ desc--;
+
+ for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
+ page_shift -= desc->bits;
+
+ ctrl->levels[i].physAddress = pd->pt[0]->addr;
+ ctrl->levels[i].size = (1 << desc->bits) * desc->size;
+ ctrl->levels[i].aperture = 1;
+ ctrl->levels[i].pageShift = page_shift;
+
+ pd = pd->pde[0];
+ }
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ } else {
+ NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->physAddress = vmm->pd->pt[0]->addr;
+ ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
+ ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
+ if (ret == 0)
+ vmm->rm.external = true;
+ }
+
+ return ret;
+}
+
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
+}
+
+static void
+r535_mmu_dtor(struct nvkm_mmu *mmu)
+{
+ kfree(mmu->func);
+}
+
+int
+r535_mmu_new(const struct nvkm_mmu_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ struct nvkm_mmu_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_mmu_dtor;
+ rm->dma_bits = hw->dma_bits;
+ rm->mmu = hw->mmu;
+ rm->mem = hw->mem;
+ rm->vmm = hw->vmm;
+ rm->kind = hw->kind;
+ rm->kind_sys = hw->kind_sys;
+ rm->promote_vmm = r535_mmu_promote_vmm;
+
+ ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
new file mode 100644
index 000000000000..5db0e7009e1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
new file mode 100644
index 000000000000..87e6240662ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r570_client = {
+ .ctor = r570_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
new file mode 100644
index 000000000000..a96e31c2d80b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <engine/disp.h>
+#include <engine/disp/outp.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/disp.h"
+
+static int
+r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+ args->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ ctrl->pbTargetAperture = PHYS_PCI;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ ctrl->pbTargetAperture = PHYS_PCI_COHERENT;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ ctrl->pbTargetAperture = PHYS_NVM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r570_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+ ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+static int
+r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+const struct nvkm_rm_api_disp
+r570_disp = {
+ .get_static_info = r570_disp_get_static_info,
+ .get_supported = r570_disp_get_supported,
+ .get_connect_state = r570_disp_get_connect_state,
+ .get_active = r570_disp_get_active,
+ .bl_ctrl = r570_bl_ctrl,
+ .dp = {
+ .get_caps = r570_dp_get_caps,
+ .set_indexed_link_rates = r570_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r570_disp_chan_set_pushbuf,
+ .dmac_alloc = r570_dmac_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
new file mode 100644
index 000000000000..2945d5b4e570
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/bar.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/fifo.h"
+
+static int
+r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend)
+{
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bDisableActiveChannels = suspend;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static void
+r570_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct nvkm_vmm *vmm;
+
+ /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ device->bar->bar2 = true;
+
+ vmm = nvkm_bar_bar2_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ vmm = nvkm_bar_bar1_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Resume channel scheduling. */
+ r570_fbsr_suspend_channels(device->gsp, false);
+
+ /* Finish cleaning up. */
+ r535_fbsr_resume(gsp);
+}
+
+static int
+r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST,
+ 0, size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = gsp->internal.client.object.handle;
+ ctrl->hSysMem = memlist.handle;
+ ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr;
+ ctrl->bEnteringGcoffState = 1;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static int
+r570_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ u64 size;
+ int ret;
+
+ /* Stop channel scheduling. */
+ r570_fbsr_suspend_channels(gsp, true);
+
+ /* Save BAR2 allocations to system memory. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable BAR2 access. */
+ device->bar->bar2 = false;
+
+ /* Allocate system memory to hold RM's VRAM allocations across suspend. */
+ size = gsp->fb.heap.size;
+ size += gsp->fb.rsvd_size;
+ size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size);
+
+ ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr);
+ if (ret)
+ return ret;
+
+ /* Initialise FBSR on RM. */
+ ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size);
+ if (ret) {
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct nvkm_rm_api_fbsr
+r570_fbsr = {
+ .suspend = r570_fbsr_suspend,
+ .resume = r570_fbsr_resume,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
new file mode 100644
index 000000000000..79132805cfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo/priv.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/runl.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+#define CHID_PER_USERD 8
+
+static int
+r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d "
+ "fault_addr:%08x%08x fault_type:%08x\n",
+ msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType,
+ msg->scope, msg->partitionAttributionId,
+ msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r570_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( COPY10, CE, 10);
+ case RM_ENGINE_TYPE( COPY11, CE, 11);
+ case RM_ENGINE_TYPE( COPY12, CE, 12);
+ case RM_ENGINE_TYPE( COPY13, CE, 13);
+ case RM_ENGINE_TYPE( COPY14, CE, 14);
+ case RM_ENGINE_TYPE( COPY15, CE, 15);
+ case RM_ENGINE_TYPE( COPY16, CE, 16);
+ case RM_ENGINE_TYPE( COPY17, CE, 17);
+ case RM_ENGINE_TYPE( COPY18, CE, 18);
+ case RM_ENGINE_TYPE( COPY19, CE, 19);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE( NVENC3, NVENC, 3);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA0, OFA, 0);
+ case RM_ENGINE_TYPE( OFA1, OFA, 1);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+const struct nvkm_rm_api_fifo
+r570_fifo = {
+ .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type,
+ .ectx_size = r570_fifo_ectx_size,
+ .rsvd_chids = 1,
+ .rc_triggered = r570_fifo_rc_triggered,
+ .chan = {
+ .alloc = r570_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
new file mode 100644
index 000000000000..b6cced9b8aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/gr.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+#include <engine/fifo/chid.h>
+#include <engine/gr/priv.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/engine.h"
+
+int
+r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->gpcId = gpc;
+
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pmask = ctrl->tpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+int
+r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->gpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
+{
+ NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bTeardown = teardown;
+
+ return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
+}
+
+static void
+r570_gr_scrubber_fini(struct r535_gr *gr)
+{
+ /* Teardown scrubber channel on RM. */
+ if (gr->scrubber.enabled) {
+ WARN_ON(r570_gr_scrubber_ctrl(gr, true));
+ gr->scrubber.enabled = false;
+ }
+
+ /* Free scrubber channel. */
+ nvkm_gsp_rm_free(&gr->scrubber.threed);
+ nvkm_gsp_rm_free(&gr->scrubber.chan);
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
+ nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
+ }
+
+ nvkm_vmm_unref(&gr->scrubber.vmm);
+ nvkm_memory_unref(&gr->scrubber.inst);
+}
+
+static int
+r570_gr_scrubber_init(struct r535_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ int ret;
+
+ /* Scrubber channel only required on TU10x. */
+ switch (device->chipset) {
+ case 0x162:
+ case 0x164:
+ case 0x166:
+ break;
+ default:
+ return 0;
+ }
+
+ if (gr->scrubber.chid < 0) {
+ gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
+ if (gr->scrubber.chid < 0)
+ return gr->scrubber.chid;
+ }
+
+ /* Allocate scrubber channel. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
+ &gr->scrubber.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
+ &gr->scrubber.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
+ NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
+ nvkm_memory_addr(gr->scrubber.inst),
+ nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
+ nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
+ gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
+ gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
+ rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
+ if (ret)
+ goto done;
+
+ /* Initialise scrubber channel on RM. */
+ ret = r570_gr_scrubber_ctrl(gr, false);
+ if (ret)
+ goto done;
+
+ gr->scrubber.enabled = true;
+
+done:
+ if (ret)
+ r570_gr_scrubber_fini(gr);
+
+ return ret;
+}
+
+static int
+r570_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+const struct nvkm_rm_api_gr
+r570_gr = {
+ .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
+ .scrubber.init = r570_gr_scrubber_init,
+ .scrubber.fini = r570_gr_scrubber_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
new file mode 100644
index 000000000000..9d2fa4e66d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+#include <rm/rpc.h>
+
+#include <asm-generic/video.h>
+
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+
+#include <core/pci.h>
+#include <subdev/pci/priv.h>
+
+static u32
+r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return (meta->frtsOffset + meta->frtsSize) -
+ (meta->nonWprHeapOffset + meta->nonWprHeapSize);
+}
+
+static void
+r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
+{
+ if (gsp->subdev.debug < NV_DBG_DEBUG) {
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
+ }
+}
+
+static bool
+r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+r570_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ u32 gpc_mask;
+ u32 tpc_mask;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+ INIT_LIST_HEAD(&gsp->internal.client.events);
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
+
+ if (gsp->rm->wpr->offset_set_by_acr) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
+ meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ ret = r570_gr_gpc_mask(gsp, &gpc_mask);
+ if (ret)
+ return ret;
+
+ for (int gpc = 0; gpc < 32; gpc++) {
+ if (gpc_mask & BIT(gpc)) {
+ ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
+ if (ret)
+ return ret;
+
+ gsp->gr.tpcs += hweight32(tpc_mask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r570_gsp_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
+ info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
+ info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
+ info->PCIRevisionID = pdev->revision;
+ r570_gsp_acpi_info(gsp, &info->acpiMethodData);
+ info->bIsPrimary = video_is_primary_device(device->dev);
+ info->bPreserveVideoMemoryAllocations = false;
+
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static void
+r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ args->bDmemStack = 1;
+}
+
+const struct nvkm_rm_api_gsp
+r570_gsp = {
+ .set_rmargs = r570_gsp_set_rmargs,
+ .set_system_info = r570_gsp_set_system_info,
+ .get_static_info = r570_gsp_get_static_info,
+ .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
+ .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
+ .sr_data_size = r570_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
new file mode 100644
index 000000000000..e8714e0abc37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+ NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8);
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
new file mode 100644
index 000000000000..06e972835d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+ NvU32 embeddedDisplayPortMask;
+ NvBool bExternalMuxSupported;
+ NvBool bInternalMuxSupported;
+ NvU32 numDispChannels;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupportedByGpu;
+ NvU32 minPClkForCompressed;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NvBool bUseRgFlushSequence;
+ NvBool bSupportDPDownSpread;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 UHBRSupportedByDfp;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+ NvU8 brightnessType;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvBool bEnableAudioOverRightPanel;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+ NvU32 pbTargetAperture;
+ NvU32 channelPBSize;
+ NvU32 subDeviceId;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef enum
+{
+ PB_SIZE_4KB = 0,
+ PB_SIZE_8KB,
+ PB_SIZE_16KB,
+ PB_SIZE_32KB,
+ PB_SIZE_64KB
+} ChannelPBSize;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+ ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum)
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+ NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method)
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3
+
+typedef enum
+{
+ IOVA,
+ PHYS_NVM,
+ PHYS_PCI,
+ PHYS_PCI_COHERENT
+} PBTARGETAPERTURE;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
new file mode 100644
index 000000000000..7997050a4f29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE10 25
+#define MC_ENGINE_IDX_CE11 26
+#define MC_ENGINE_IDX_CE12 27
+#define MC_ENGINE_IDX_CE13 28
+#define MC_ENGINE_IDX_CE14 29
+#define MC_ENGINE_IDX_CE15 30
+#define MC_ENGINE_IDX_CE16 31
+#define MC_ENGINE_IDX_CE17 32
+#define MC_ENGINE_IDX_CE18 33
+#define MC_ENGINE_IDX_CE19 34
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_NVENC 38
+#define MC_ENGINE_IDX_NVENC1 39
+#define MC_ENGINE_IDX_NVENC2 40
+#define MC_ENGINE_IDX_NVENC3 41
+#define MC_ENGINE_IDX_C2C 42
+#define MC_ENGINE_IDX_LTC 43
+#define MC_ENGINE_IDX_FBHUB 44
+#define MC_ENGINE_IDX_HDACODEC 45
+#define MC_ENGINE_IDX_GMMU 46
+#define MC_ENGINE_IDX_SEC2 47
+#define MC_ENGINE_IDX_FSP 48
+#define MC_ENGINE_IDX_NVLINK 49
+#define MC_ENGINE_IDX_GSP 50
+#define MC_ENGINE_IDX_NVJPG 51
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 52
+#define MC_ENGINE_IDX_NVJPEG2 53
+#define MC_ENGINE_IDX_NVJPEG3 54
+#define MC_ENGINE_IDX_NVJPEG4 55
+#define MC_ENGINE_IDX_NVJPEG5 56
+#define MC_ENGINE_IDX_NVJPEG6 57
+#define MC_ENGINE_IDX_NVJPEG7 58
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59
+#define MC_ENGINE_IDX_ACCESS_CNTR 60
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63
+#define MC_ENGINE_IDX_INFO_FAULT 64
+#define MC_ENGINE_IDX_BSP 65
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 66
+#define MC_ENGINE_IDX_NVDEC2 67
+#define MC_ENGINE_IDX_NVDEC3 68
+#define MC_ENGINE_IDX_NVDEC4 69
+#define MC_ENGINE_IDX_NVDEC5 70
+#define MC_ENGINE_IDX_NVDEC6 71
+#define MC_ENGINE_IDX_NVDEC7 72
+#define MC_ENGINE_IDX_CPU_DOORBELL 73
+#define MC_ENGINE_IDX_PRIV_DOORBELL 74
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 75
+#define MC_ENGINE_IDX_BLG 76
+#define MC_ENGINE_IDX_PERFMON 77
+#define MC_ENGINE_IDX_BUF_RESET 78
+#define MC_ENGINE_IDX_XBAR 79
+#define MC_ENGINE_IDX_ZPW 80
+#define MC_ENGINE_IDX_OFA0 81
+#define MC_ENGINE_IDX_OFA1 82
+#define MC_ENGINE_IDX_TEGRA 83
+#define MC_ENGINE_IDX_GR 84
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 85
+#define MC_ENGINE_IDX_GR2 86
+#define MC_ENGINE_IDX_GR3 87
+#define MC_ENGINE_IDX_GR4 88
+#define MC_ENGINE_IDX_GR5 89
+#define MC_ENGINE_IDX_GR6 90
+#define MC_ENGINE_IDX_GR7 91
+#define MC_ENGINE_IDX_ESCHED 92
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 156
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 157
+#define MC_ENGINE_IDX_GR2_FECS_LOG 158
+#define MC_ENGINE_IDX_GR3_FECS_LOG 159
+#define MC_ENGINE_IDX_GR4_FECS_LOG 160
+#define MC_ENGINE_IDX_GR5_FECS_LOG 161
+#define MC_ENGINE_IDX_GR6_FECS_LOG 162
+#define MC_ENGINE_IDX_GR7_FECS_LOG 163
+#define MC_ENGINE_IDX_TMR_SWRL 164
+#define MC_ENGINE_IDX_DISP_GSP 165
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167
+#define MC_ENGINE_IDX_PXUC 168
+#define MC_ENGINE_IDX_SYSLTC 169
+#define MC_ENGINE_IDX_LRCC 170
+#define MC_ENGINE_IDX_GSPLITE 171
+#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE
+#define MC_ENGINE_IDX_GSPLITE1 172
+#define MC_ENGINE_IDX_GSPLITE2 173
+#define MC_ENGINE_IDX_GSPLITE3 174
+#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3
+#define MC_ENGINE_IDX_DPAUX 175
+#define MC_ENGINE_IDX_DISP_LOW 176
+#define MC_ENGINE_IDX_MAX 177
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_COPY10 = (0x00000013),
+ RM_ENGINE_TYPE_COPY11 = (0x00000014),
+ RM_ENGINE_TYPE_COPY12 = (0x00000015),
+ RM_ENGINE_TYPE_COPY13 = (0x00000016),
+ RM_ENGINE_TYPE_COPY14 = (0x00000017),
+ RM_ENGINE_TYPE_COPY15 = (0x00000018),
+ RM_ENGINE_TYPE_COPY16 = (0x00000019),
+ RM_ENGINE_TYPE_COPY17 = (0x0000001a),
+ RM_ENGINE_TYPE_COPY18 = (0x0000001b),
+ RM_ENGINE_TYPE_COPY19 = (0x0000001c),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ // Bug 4175886 - Use this new value for all chips once GB20X is released
+ RM_ENGINE_TYPE_NVENC3 = (0x00000028),
+ RM_ENGINE_TYPE_VP = (0x00000029),
+ RM_ENGINE_TYPE_ME = (0x0000002a),
+ RM_ENGINE_TYPE_PPP = (0x0000002b),
+ RM_ENGINE_TYPE_MPEG = (0x0000002c),
+ RM_ENGINE_TYPE_SW = (0x0000002d),
+ RM_ENGINE_TYPE_TSEC = (0x0000002e),
+ RM_ENGINE_TYPE_VIC = (0x0000002f),
+ RM_ENGINE_TYPE_MP = (0x00000030),
+ RM_ENGINE_TYPE_SEC2 = (0x00000031),
+ RM_ENGINE_TYPE_HOST = (0x00000032),
+ RM_ENGINE_TYPE_DPU = (0x00000033),
+ RM_ENGINE_TYPE_PMU = (0x00000034),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d),
+ RM_ENGINE_TYPE_OFA0 = (0x0000003e),
+ RM_ENGINE_TYPE_OFA1 = (0x0000003f),
+ RM_ENGINE_TYPE_RESERVED40 = (0x00000040),
+ RM_ENGINE_TYPE_RESERVED41 = (0x00000041),
+ RM_ENGINE_TYPE_RESERVED42 = (0x00000042),
+ RM_ENGINE_TYPE_RESERVED43 = (0x00000043),
+ RM_ENGINE_TYPE_RESERVED44 = (0x00000044),
+ RM_ENGINE_TYPE_RESERVED45 = (0x00000045),
+ RM_ENGINE_TYPE_RESERVED46 = (0x00000046),
+ RM_ENGINE_TYPE_RESERVED47 = (0x00000047),
+ RM_ENGINE_TYPE_RESERVED48 = (0x00000048),
+ RM_ENGINE_TYPE_RESERVED49 = (0x00000049),
+ RM_ENGINE_TYPE_RESERVED4a = (0x0000004a),
+ RM_ENGINE_TYPE_RESERVED4b = (0x0000004b),
+ RM_ENGINE_TYPE_RESERVED4c = (0x0000004c),
+ RM_ENGINE_TYPE_RESERVED4d = (0x0000004d),
+ RM_ENGINE_TYPE_RESERVED4e = (0x0000004e),
+ RM_ENGINE_TYPE_RESERVED4f = (0x0000004f),
+ RM_ENGINE_TYPE_RESERVED50 = (0x00000050),
+ RM_ENGINE_TYPE_RESERVED51 = (0x00000051),
+ RM_ENGINE_TYPE_RESERVED52 = (0x00000052),
+ RM_ENGINE_TYPE_RESERVED53 = (0x00000053),
+ RM_ENGINE_TYPE_LAST = (0x00000054),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA
+#define NV2080_ENGINE_TYPE_COPY10 (0x00000034)
+#define NV2080_ENGINE_TYPE_COPY11 (0x00000035)
+#define NV2080_ENGINE_TYPE_COPY12 (0x00000036)
+#define NV2080_ENGINE_TYPE_COPY13 (0x00000037)
+#define NV2080_ENGINE_TYPE_COPY14 (0x00000038)
+#define NV2080_ENGINE_TYPE_COPY15 (0x00000039)
+#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a)
+#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b)
+#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c)
+#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d)
+#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e)
+#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053)
+#define NV2080_ENGINE_TYPE_LAST (0x00000054)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
new file mode 100644
index 000000000000..8af432375f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NvBool bEnteringGcoffState;
+ NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8);
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
new file mode 100644
index 000000000000..2b002ca64e0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+ NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 gfid;
+ NvU32 exceptLevel;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+ NvU32 mmuFaultAddrLo;
+ NvU32 mmuFaultAddrHi;
+ NvU32 mmuFaultType;
+ NvBool bCallbackNeeded;
+ NvU32 rcJournalBufferSize;
+ NvU8 rcJournalBuffer[];
+} rpc_rc_triggered_v17_02;
+
+#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS {
+ NvBool bDisableActiveChannels;
+} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
new file mode 100644
index 000000000000..feed1dabd9d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a)
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042
+#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3)
+#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4)
+
+typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
+ NvBool bTeardown;
+} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
new file mode 100644
index 000000000000..b6075021e74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[9];
+ char chipSKUMod[5];
+ NvU32 skuConfigVersion;
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+ NvBool bNonPowerOf2ChannelCountSupported;
+ NvBool bVfResizableBAR1Supported;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+#define MAX_GROUP_COUNT 2
+
+typedef struct
+{
+ NvU32 ecidLow;
+ NvU32 ecidHigh;
+ NvU32 ecidExtended;
+} EcidManufacturingInfo;
+
+typedef struct
+{
+ NvU64 nonWprHeapOffset;
+ NvU64 frtsOffset;
+} FW_WPR_LAYOUT_OFFSET;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU64 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU64 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGc8Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+ NvBool bIsMigSupported;
+
+ /* "Total Board Power" refers to power requirement of GPU,
+ * while in GC6 state. Majority of this power will be used
+ * to keep V-RAM active to preserve its content.
+ * Some energy maybe consumed by Always-on components on GPU chip.
+ * This power will be provided by 3.3v voltage rail.
+ */
+ NvU16 RTD3GC6TotalBoardPower;
+
+ /* PERST# (i.e. PCI Express Reset) is a sideband signal
+ * generated by the PCIe Host to indicate the PCIe devices,
+ * that the power-rails and the reference-clock are stable.
+ * The endpoint device typically uses this signal as a global reset.
+ */
+ NvU16 RTD3GC6PerstDelay;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+ NvBool bIsEfiInit;
+
+ EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT];
+
+ FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset;
+} GspStaticConfigInfo;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct
+{
+ // Link capabilities
+ NvU32 linkCap;
+} GSP_PCIE_CONFIG_REG;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 gpuPhysIoAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 notifyOpSharedSurfacePhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU32 PCIDeviceID;
+ NvU32 PCISubDeviceID;
+ NvU32 PCIRevisionID;
+ NvU32 pcieAtomicsCplDeviceCapMask;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bFlrSupported;
+ NvBool b64bBar0Supported;
+ NvBool bMnocAvailable;
+ NvU32 chipsetL1ssEnable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvBool bSystemHasMux;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+ NvBool bIsPrimary;
+ NvBool isGridBuild;
+ GSP_PCIE_CONFIG_REG pcieConfigReg;
+ NvU32 gridBuildCsp;
+ NvBool bPreserveVideoMemoryAllocations;
+ NvBool bTdrEventSupported;
+ NvBool bFeatureStretchVblankCapable;
+ NvBool bEnableDynamicGranularityPageArrays;
+ NvBool bClockBoostSupported;
+ NvBool bRouteDispIntrsToCPU;
+ NvU64 hostPageSize;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+ NvU32 preemptiveRemovalPreviousXid;
+} rpc_os_error_log_v17_00;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Flags to help decide GSP-FW flow.
+ NvU8 flags;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[2];
+
+ //
+ // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned
+ // to end where this allocation starts (when RM requests FSP to create
+ // FRTS).
+ //
+ NvU32 pmuReservedSize;
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct {
+ NvU64 sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+ NvBool bDmemStack;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct
+{
+ // Magic for verification by secure ucode
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ // Members regarding data in SYSMEM
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ //
+ // Internal members for use by secure ucode
+ // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes.
+ //
+ NvU32 internal[32];
+
+ // Same as flags of GspFwWprMeta
+ NvU32 flags;
+
+ // Subrevision number used by secure ucode
+ NvU32 subrevision;
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[22];
+} GspFwSRMeta;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u)
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \
+ (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \
+ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA))
+
+typedef struct GSP_FMC_INIT_PARAMS
+{
+ // CC initialization "registry keys"
+ NvU32 regkeys;
+} GSP_FMC_INIT_PARAMS;
+
+typedef enum {
+ GSP_DMA_TARGET_LOCAL_FB,
+ GSP_DMA_TARGET_COHERENT_SYSTEM,
+ GSP_DMA_TARGET_NONCOHERENT_SYSTEM,
+ GSP_DMA_TARGET_COUNT
+} GSP_DMA_TARGET;
+
+typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
+{
+ // Physical memory aperture through which gspRmDescPa is accessed
+ GSP_DMA_TARGET target;
+ // Size in bytes of the GSP-RM descriptor structure
+ NvU32 gspRmDescSize;
+ // Physical offset in the target aperture of the GSP-RM descriptor structure
+ NvU64 gspRmDescOffset;
+ // Physical offset in FB to set the start of the WPR containing GSP-RM
+ NvU64 wprCarveoutOffset;
+ // Size in bytes of the WPR containing GSP-RM
+ NvU32 wprCarveoutSize;
+ // Whether to boot GSP-RM or GSP-Proxy through ACR
+ NvBool bIsGspRmBoot;
+} GSP_ACR_BOOT_GSP_RM_PARAMS;
+
+typedef struct GSP_RM_PARAMS
+{
+ // Physical memory aperture through which bootArgsOffset is accessed
+ GSP_DMA_TARGET target;
+ // Physical offset in the memory aperture that will be passed to GSP-RM
+ NvU64 bootArgsOffset;
+} GSP_RM_PARAMS;
+
+typedef struct GSP_SPDM_PARAMS
+{
+ // Physical Memory Aperture through which all addresses are accessed
+ GSP_DMA_TARGET target;
+
+ // Physical offset in the memory aperture where SPDM payload is stored
+ NvU64 payloadBufferOffset;
+
+ // Size of the above payload buffer
+ NvU32 payloadBufferSize;
+} GSP_SPDM_PARAMS;
+
+typedef struct GSP_FMC_BOOT_PARAMS
+{
+ GSP_FMC_INIT_PARAMS initParams;
+ GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams;
+ GSP_RM_PARAMS gspRmParams;
+ GSP_SPDM_PARAMS gspSpdmParams;
+} GSP_FMC_BOOT_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
new file mode 100644
index 000000000000..e06643f57695
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef E
+# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT, 0x1000)
+ E(GSP_INIT_DONE, 0x1001)
+ E(GSP_RUN_CPU_SEQUENCER, 0x1002)
+ E(POST_EVENT, 0x1003)
+ E(RC_TRIGGERED, 0x1004)
+ E(MMU_FAULT_QUEUED, 0x1005)
+ E(OS_ERROR_LOG, 0x1006)
+ E(RG_LINE_INTR, 0x1007)
+ E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
+ E(SIM_READ, 0x1009)
+ E(SIM_WRITE, 0x100a)
+ E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
+ E(UCODE_LIBOS_PRINT, 0x100c)
+ E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
+ E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
+ E(VGPU_CONFIG, 0x1010)
+ E(DISPLAY_MODESET, 0x1011)
+ E(EXTDEV_INTR_SERVICE, 0x1012)
+ E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
+ E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
+ E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
+ E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
+ E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
+ E(TIMED_SEMAPHORE_RELEASE, 0x1018)
+ E(NVLINK_IS_GPU_DEGRADED, 0x1019)
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
+ E(NVLINK_FAULT_UP, 0x101b)
+ E(GSP_LOCKDOWN_NOTICE, 0x101c)
+ E(MIG_CI_CONFIG_UPDATE, 0x101d)
+ E(UPDATE_GSP_TRACE, 0x101e)
+ E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
+ E(GSP_POST_NOCAT_RECORD, 0x1020)
+ E(FECS_ERROR, 0x1021)
+ E(RECOVERY_ACTION, 0x1022)
+ E(NUM_EVENTS, 0x1023)
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
new file mode 100644
index 000000000000..fcaef7f553a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+ NvU32 engineInstance;
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2d67b598c58b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef X
+# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP, 0)
+ X(RM, SET_GUEST_SYSTEM_INFO, 1)
+ X(RM, ALLOC_ROOT, 2)
+ X(RM, ALLOC_DEVICE, 3) // deprecated
+ X(RM, ALLOC_MEMORY, 4)
+ X(RM, ALLOC_CTX_DMA, 5)
+ X(RM, ALLOC_CHANNEL_DMA, 6)
+ X(RM, MAP_MEMORY, 7)
+ X(RM, BIND_CTX_DMA, 8) // deprecated
+ X(RM, ALLOC_OBJECT, 9)
+ X(RM, FREE, 10)
+ X(RM, LOG, 11)
+ X(RM, ALLOC_VIDMEM, 12)
+ X(RM, UNMAP_MEMORY, 13)
+ X(RM, MAP_MEMORY_DMA, 14)
+ X(RM, UNMAP_MEMORY_DMA, 15)
+ X(RM, GET_EDID, 16) // deprecated
+ X(RM, ALLOC_DISP_CHANNEL, 17)
+ X(RM, ALLOC_DISP_OBJECT, 18)
+ X(RM, ALLOC_SUBDEVICE, 19)
+ X(RM, ALLOC_DYNAMIC_MEMORY, 20)
+ X(RM, DUP_OBJECT, 21)
+ X(RM, IDLE_CHANNELS, 22)
+ X(RM, ALLOC_EVENT, 23)
+ X(RM, SEND_EVENT, 24) // deprecated
+ X(RM, REMAPPER_CONTROL, 25) // deprecated
+ X(RM, DMA_CONTROL, 26) // deprecated
+ X(RM, DMA_FILL_PTE_MEM, 27)
+ X(RM, MANAGE_HW_RESOURCE, 28)
+ X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
+ X(RM, CREATE_FB_SEGMENT, 30)
+ X(RM, DESTROY_FB_SEGMENT, 31)
+ X(RM, ALLOC_SHARE_DEVICE, 32)
+ X(RM, DEFERRED_API_CONTROL, 33)
+ X(RM, REMOVE_DEFERRED_API, 34)
+ X(RM, SIM_ESCAPE_READ, 35)
+ X(RM, SIM_ESCAPE_WRITE, 36)
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
+ X(RM, FREE_VIDMEM_VIRT, 38)
+ X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
+ X(RM, PERF_GET_PERFMON_SAMPLE, 40)
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
+ X(RM, PERF_GET_LEVEL_INFO, 42)
+ X(RM, MAP_SEMA_MEMORY, 43)
+ X(RM, UNMAP_SEMA_MEMORY, 44)
+ X(RM, SET_SURFACE_PROPERTIES, 45)
+ X(RM, CLEANUP_SURFACE, 46)
+ X(RM, UNLOADING_GUEST_DRIVER, 47)
+ X(RM, TDR_SET_TIMEOUT_STATE, 48)
+ X(RM, SWITCH_TO_VGA, 49)
+ X(RM, GPU_EXEC_REG_OPS, 50)
+ X(RM, GET_STATIC_INFO, 51)
+ X(RM, ALLOC_VIRTMEM, 52)
+ X(RM, UPDATE_PDE_2, 53)
+ X(RM, SET_PAGE_DIRECTORY, 54)
+ X(RM, GET_STATIC_PSTATE_INFO, 55)
+ X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
+ X(RM, RESERVED_57, 57)
+ X(RM, RESET_CURRENT_GR_CONTEXT, 58)
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
+ X(RM, GET_ENGINE_UTILIZATION, 60)
+ X(RM, UPDATE_GPU_PDES, 61)
+ X(RM, GET_ENCODER_CAPACITY, 62)
+ X(RM, VGPU_PF_REG_READ32, 63) // deprecated
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
+ X(GSP, GET_GSP_STATIC_INFO, 65)
+ X(RM, RMFS_INIT, 66) // deprecated
+ X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
+ X(RM, RMFS_CLEANUP, 68) // deprecated
+ X(RM, RMFS_TEST, 69) // deprecated
+ X(RM, UPDATE_BAR_PDE, 70)
+ X(RM, CONTINUATION_RECORD, 71)
+ X(RM, GSP_SET_SYSTEM_INFO, 72)
+ X(RM, SET_REGISTRY, 73)
+ X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
+ X(GSP, GSP_RM_CONTROL, 76)
+ X(RM, GET_STATIC_INFO2, 77)
+ X(RM, DUMP_PROTOBUF_COMPONENT, 78)
+ X(RM, UNSET_PAGE_DIRECTORY, 79)
+ X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
+ X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
+ X(RM, CTRL_RESET_CHANNEL, 88)
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
+ X(RM, CTRL_PERF_BOOST, 92)
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
+ X(RM, CTRL_GPFIFO_SCHEDULE, 97)
+ X(RM, CTRL_SET_TIMESLICE, 98)
+ X(RM, CTRL_PREEMPT, 99)
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
+ X(GSP, GSP_RM_ALLOC, 103)
+ X(RM, CTRL_GET_P2P_CAPS_V2, 104)
+ X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
+ X(RM, CTRL_CIPHER_SESSION_KEY, 106)
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
+ X(RM, CTRL_GPU_PROMOTE_CTX, 111)
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
+ X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
+ X(RM, CTRL_GET_CE_PCE_MASK, 121)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
+ X(RM, CTRL_GET_NVLINK_STATUS, 124)
+ X(RM, CTRL_GET_P2P_CAPS, 125)
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
+ X(RM, RESERVED_0, 127)
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
+ X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
+ X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
+ X(RM, CTRL_BIND_PM_RESOURCES, 131)
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
+ X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
+ X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
+ X(RM, CTRL_ALLOC_PMA_STREAM, 140)
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
+ X(RM, CTRL_FB_GET_INFO_V2, 142)
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
+ X(RM, CTRL_GPU_EVICT_CTX, 146)
+ X(RM, CTRL_FB_GET_FS_INFO, 147)
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
+ X(RM, CTRL_STOP_CHANNEL, 149)
+ X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
+ X(RM, CTRL_FREE_PMA_STREAM, 153)
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
+ X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
+ X(RM, DCE_RM_INIT, 168)
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
+ X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
+ X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
+ X(RM, DISABLE_CHANNELS, 178)
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
+ X(RM, CTRL_FABRIC_MEM_STATS, 180)
+ X(RM, SAVE_HIBERNATION_DATA, 181)
+ X(RM, RESTORE_HIBERNATION_DATA, 182)
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
+ X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
+ X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
+ X(RM, CTRL_GET_HS_CREDITS, 197)
+ X(RM, CTRL_SET_HS_CREDITS, 198)
+ X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
+ X(RM, INVALIDATE_TLB, 200)
+ X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
+ X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
+ X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
+ X(RM, RM_API_CONTROL, 204)
+ X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
+ X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
+ X(RM, GET_STATIC_DATA, 207)
+ X(RM, RESERVED_208, 208)
+ X(RM, CTRL_GPU_GET_INFO_V2, 209)
+ X(RM, GET_BRAND_CAPS, 210)
+ X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
+ X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212)
+ X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213)
+ X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214)
+ X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215)
+ X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216)
+ X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217)
+ X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218)
+ X(RM, CTRL_RESERVE_HES, 219)
+ X(RM, CTRL_RELEASE_HES, 220)
+ X(RM, CTRL_RESERVE_CCU_PROF, 221)
+ X(RM, CTRL_RELEASE_CCU_PROF, 222)
+ X(RM, RESERVED, 223)
+ X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224)
+ X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225)
+ X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226)
+ X(RM, NUM_FUNCTIONS, 227)
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
new file mode 100644
index 000000000000..6fb3083edde3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r570_ofa = {
+ .alloc = r570_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
new file mode 100644
index 000000000000..498658d0c60c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gh100 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb10x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb20x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x220000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_api
+r570_api = {
+ .gsp = &r570_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r570_client,
+ .device = &r535_device,
+ .fbsr = &r570_fbsr,
+ .disp = &r570_disp,
+ .fifo = &r570_fifo,
+ .ce = &r535_ce,
+ .gr = &r570_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r570_ofa,
+};
+
+const struct nvkm_rm_impl
+r570_rm_tu102 = {
+ .wpr = &r570_wpr_libos2,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_ga102 = {
+ .wpr = &r570_wpr_libos3,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gh100 = {
+ .wpr = &r570_wpr_libos3_gh100,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb10x = {
+ .wpr = &r570_wpr_libos3_gb10x,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb20x = {
+ .wpr = &r570_wpr_libos3_gb20x,
+ .api = &r570_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
new file mode 100644
index 000000000000..393ea775941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/gsp.h>
+#ifndef __NVKM_RM_H__
+#define __NVKM_RM_H__
+#include "handles.h"
+struct nvkm_outp;
+struct r535_gr;
+
+struct nvkm_rm_impl {
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm {
+ struct nvkm_device *device;
+ const struct nvkm_rm_gpu *gpu;
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm_wpr {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 heap_size_min;
+ u32 heap_size_non_wpr;
+ u32 rsvd_size_pmu;
+ bool offset_set_by_acr;
+};
+
+struct nvkm_rm_api {
+ const struct nvkm_rm_api_gsp {
+ void (*set_rmargs)(struct nvkm_gsp *, bool resume);
+ int (*set_system_info)(struct nvkm_gsp *);
+ int (*get_static_info)(struct nvkm_gsp *);
+ bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst);
+ void (*drop_send_user_shared_data)(struct nvkm_gsp *);
+ void (*drop_post_nocat_record)(struct nvkm_gsp *);
+ u32 (*sr_data_size)(struct nvkm_gsp *);
+ } *gsp;
+
+ const struct nvkm_rm_api_rpc {
+ void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+ void (*done)(struct nvkm_gsp *gsp, void *repv);
+ } *rpc;
+
+ const struct nvkm_rm_api_ctrl {
+ void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size);
+ int (*push)(struct nvkm_gsp_object *, void **params, u32 repc);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+ } *ctrl;
+
+ const struct nvkm_rm_api_alloc {
+ void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size);
+ void *(*push)(struct nvkm_gsp_object *, void *params);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+
+ int (*free)(struct nvkm_gsp_object *);
+ } *alloc;
+
+ const struct nvkm_rm_api_client {
+ int (*ctor)(struct nvkm_gsp_client *, u32 handle);
+ } *client;
+
+ const struct nvkm_rm_api_device {
+ int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*dtor)(struct nvkm_gsp_device *);
+
+ struct {
+ int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*dtor)(struct nvkm_gsp_event *);
+ } event;
+ } *device;
+
+ const struct nvkm_rm_api_fbsr {
+ int (*suspend)(struct nvkm_gsp *);
+ void (*resume)(struct nvkm_gsp *);
+ } *fbsr;
+
+ const struct nvkm_rm_api_disp {
+ int (*get_static_info)(struct nvkm_disp *);
+ int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask);
+ int (*get_connect_state)(struct nvkm_disp *, unsigned display_id);
+ int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id);
+
+ int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val);
+
+ struct {
+ int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm);
+ int (*set_indexed_link_rates)(struct nvkm_outp *);
+ } dp;
+
+ struct {
+ int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst,
+ struct nvkm_memory *);
+ int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *disp;
+
+ const struct nvkm_rm_api_fifo {
+ int (*xlat_rm_engine_type)(u32 rm_engine_type,
+ enum nvkm_subdev_type *, int *nv2080_type);
+ int (*ectx_size)(struct nvkm_fifo *);
+ unsigned rsvd_chids;
+ int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc);
+ struct {
+ int (*alloc)(struct nvkm_gsp_device *, u32 handle,
+ u32 nv2080_engine_type, u8 runq, bool priv, int chid,
+ u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *fifo;
+
+ const struct nvkm_rm_api_engine {
+ int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *);
+ } *ce, *nvdec, *nvenc, *nvjpg, *ofa;
+
+ const struct nvkm_rm_api_gr {
+ int (*get_ctxbufs_info)(struct r535_gr *);
+ struct {
+ int (*init)(struct r535_gr *);
+ void (*fini)(struct r535_gr *);
+ } scrubber;
+ } *gr;
+};
+
+extern const struct nvkm_rm_impl r535_rm_tu102;
+extern const struct nvkm_rm_impl r535_rm_ga102;
+extern const struct nvkm_rm_api_gsp r535_gsp;
+typedef struct DOD_METHOD_DATA DOD_METHOD_DATA;
+typedef struct JT_METHOD_DATA JT_METHOD_DATA;
+typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA;
+void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *);
+void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *);
+void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *);
+struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+void r535_gsp_get_static_info_fb(struct nvkm_gsp *,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *);
+extern const struct nvkm_rm_api_rpc r535_rpc;
+extern const struct nvkm_rm_api_ctrl r535_ctrl;
+extern const struct nvkm_rm_api_alloc r535_alloc;
+extern const struct nvkm_rm_api_client r535_client;
+void r535_gsp_client_dtor(struct nvkm_gsp_client *);
+extern const struct nvkm_rm_api_device r535_device;
+int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external);
+void r535_mmu_vaspace_del(struct nvkm_vmm *);
+extern const struct nvkm_rm_api_fbsr r535_fbsr;
+void r535_fbsr_resume(struct nvkm_gsp *);
+int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target,
+ u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *);
+extern const struct nvkm_rm_api_disp r535_disp;
+extern const struct nvkm_rm_api_fifo r535_fifo;
+void r535_fifo_rc_chid(struct nvkm_fifo *, int chid);
+extern const struct nvkm_rm_api_engine r535_ce;
+extern const struct nvkm_rm_api_gr r535_gr;
+void *r535_gr_dtor(struct nvkm_gr *);
+int r535_gr_oneinit(struct nvkm_gr *);
+u64 r535_gr_units(struct nvkm_gr *);
+int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *,
+ struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma,
+ struct nvkm_gsp_object *chan);
+extern const struct nvkm_rm_api_engine r535_nvdec;
+extern const struct nvkm_rm_api_engine r535_nvenc;
+extern const struct nvkm_rm_api_engine r535_nvjpg;
+extern const struct nvkm_rm_api_engine r535_ofa;
+
+extern const struct nvkm_rm_impl r570_rm_tu102;
+extern const struct nvkm_rm_impl r570_rm_ga102;
+extern const struct nvkm_rm_impl r570_rm_gh100;
+extern const struct nvkm_rm_impl r570_rm_gb10x;
+extern const struct nvkm_rm_impl r570_rm_gb20x;
+extern const struct nvkm_rm_api_gsp r570_gsp;
+extern const struct nvkm_rm_api_client r570_client;
+extern const struct nvkm_rm_api_fbsr r570_fbsr;
+extern const struct nvkm_rm_api_disp r570_disp;
+extern const struct nvkm_rm_api_fifo r570_fifo;
+extern const struct nvkm_rm_api_gr r570_gr;
+int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask);
+int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask);
+extern const struct nvkm_rm_api_engine r570_ofa;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
new file mode 100644
index 000000000000..4431e33b3304
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_RPC_H__
+#define __NVKM_RM_RPC_H__
+#include "rm.h"
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn);
+
+struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len);
+int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv);
+
+int r535_rpc_status_to_errno(uint32_t rpc_status);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
new file mode 100644
index 000000000000..423502f870db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+tu1xx_gpu = {
+ .disp.class = {
+ .root = TU102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = TU102_DISP_CORE_CHANNEL_DMA,
+ .wndw = TU102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = TU102_DISP_CURSOR,
+ },
+
+ .usermode.class = TURING_USERMODE_A,
+
+ .fifo.chan = {
+ .class = TURING_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = TURING_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = TURING_A,
+ .compute = TURING_COMPUTE_A,
+ },
+ .nvdec.class = NVC4B0_VIDEO_DECODER,
+ .nvenc.class = NVC4B7_VIDEO_ENCODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 59c5f2b9172a..58e233bc53b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -22,11 +22,45 @@
#include "priv.h"
#include <subdev/fb.h>
+#include <engine/sec2.h>
+
+#include <rm/r535/nvrm/gsp.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+static int
+tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+}
+
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
@@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp)
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
+int
+tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ if (suspend) {
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+ return 0;
+}
+
+int
+tu102_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = tu102_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ return ret;
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+u64
+tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp)
+{
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+ u64 heap_size;
+
+ heap_size = gsp->rm->wpr->os_carveout_size +
+ gsp->rm->wpr->base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ return max(heap_size, gsp->rm->wpr->heap_size_min);
+}
+
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
@@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
- gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
- return r535_gsp_oneinit(gsp);
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp);
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = tu102_gsp_wpr_meta_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+ return 0;
}
const struct nvkm_falcon_func
@@ -163,29 +362,73 @@ tu102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-tu102_gsp_r535_113_01 = {
+tu102_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
+int
+tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ bool enable_gsp = fwif->enable;
+ int ret;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ return -EINVAL;
+
+ ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
static struct nvkm_gsp_fwif
tu102_gsps[] = {
- { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 04fbd9ed28b1..97eb046c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -22,29 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-tu116_gsp_r535_113_01 = {
+tu116_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
- { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144);