summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-10-31 20:56:19 +0300
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 06:32:31 +0300
commit51645eb71485dd3d72d9fe2acd0298057afdf437 (patch)
tree425a3b72924ee3ebb2d93cb70a422c150bb3ad12 /drivers
parent3a314f747ba5b4cca22a36603768c176d1761afd (diff)
downloadlinux-51645eb71485dd3d72d9fe2acd0298057afdf437.tar.xz
drm/nouveau/mmu: build up information on available memory types
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c120
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h1
11 files changed, 151 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 7fa60d79ec4c..11a169d3ed8c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -108,6 +108,26 @@ struct nvkm_mmu {
u8 dma_bits;
u8 lpg_shift;
+ int heap_nr;
+ struct {
+#define NVKM_MEM_VRAM 0x01
+#define NVKM_MEM_HOST 0x02
+#define NVKM_MEM_COMP 0x04
+#define NVKM_MEM_DISP 0x08
+ u8 type;
+ u64 size;
+ } heap[4];
+
+ int type_nr;
+ struct {
+#define NVKM_MEM_KIND 0x10
+#define NVKM_MEM_MAPPABLE 0x20
+#define NVKM_MEM_COHERENT 0x40
+#define NVKM_MEM_UNCACHED 0x80
+ u8 type;
+ u8 heap;
+ } type[16];
+
struct nvkm_vmm *vmm;
struct {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 9bf688df24f0..be600049f221 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -24,6 +24,7 @@
#include "priv.h"
#include "vmm.h"
+#include <subdev/bar.h>
#include <subdev/fb.h>
#include <nvif/if500d.h>
@@ -443,11 +444,130 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
return 0;
}
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
+{
+ if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+ mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+ mmu->type[mmu->type_nr].heap = heap;
+ mmu->type_nr++;
+ }
+}
+
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
+{
+ if (size) {
+ if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+ mmu->heap[mmu->heap_nr].type = type;
+ mmu->heap[mmu->heap_nr].size = size;
+ return mmu->heap_nr++;
+ }
+ }
+ return -EINVAL;
+}
+
+static void
+nvkm_mmu_host(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+ int heap;
+
+ /* Non-mappable system memory. */
+ heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Non-coherent, cached, system memory.
+ *
+ * Block-linear mappings of system memory must be done through
+ * BAR1, and cannot be supported on systems where we're unable
+ * to map BAR1 with write-combining.
+ */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar || device->bar->iomap_uncached)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+ else
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Coherent, cached, system memory.
+ *
+ * Unsupported on systems that aren't able to support snooped
+ * mappings, and also for block-linear mappings which must be
+ * done through BAR1.
+ */
+ type |= NVKM_MEM_COHERENT;
+ if (device->func->cpu_coherent)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+ /* Uncached system memory. */
+ nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
+}
+
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
+{
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+ const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+
+ /* Mixed-memory doesn't support compression or display. */
+ heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+ heap |= NVKM_MEM_COMP;
+ heap |= NVKM_MEM_DISP;
+ heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+ heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+ /* Add non-mappable VRAM types first so that they're preferred
+ * over anything else. Mixed-memory will be slower than other
+ * heaps, it's prioritised last.
+ */
+ nvkm_mmu_type(mmu, heapU, type);
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+
+ /* Add host memory types next, under the assumption that users
+ * wanting mappable memory want to use them as staging buffers
+ * or the like.
+ */
+ nvkm_mmu_host(mmu);
+
+ /* Mappable VRAM types go last, as they're basically the worst
+ * possible type to ask for unless there's no other choice.
+ */
+ if (device->bar) {
+ /* Write-combined BAR1 access. */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar->iomap_uncached) {
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
+
+ /* Uncached BAR1 access. */
+ type |= NVKM_MEM_COHERENT;
+ type |= NVKM_MEM_UNCACHED;
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
+}
+
static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ /* Determine available memory types. */
+ if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+ nvkm_mmu_vram(mmu);
+ else
+ nvkm_mmu_host(mmu);
+
if (mmu->func->vmm.global) {
int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
"gart", &mmu->vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
index c90c9980478b..0f4c9533c967 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -30,6 +30,7 @@ g84_mmu = {
.lpg_shift = 16,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
.kind = nv50_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index ca5ca27188cd..81e9145f96a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -78,6 +78,7 @@ gf100_mmu = {
.lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
.kind = gf100_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
index db7353aa25df..95f07acaed2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -30,6 +30,7 @@ gk104_mmu = {
.lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
.kind = gf100_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
index 2dfe8b1ba109..18d4a321bd64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -30,6 +30,7 @@ gk20a_mmu = {
.lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
.kind = gf100_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index a9628baf5372..f961f782ac9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -74,6 +74,7 @@ gm200_mmu = {
.lpg_shift = 17,
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
static const struct nvkm_mmu_func
@@ -83,6 +84,7 @@ gm200_mmu_fixed = {
.lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 5d14c138af02..585fe80f87af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -32,6 +32,7 @@ gm20b_mmu = {
.lpg_shift = 17,
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
static const struct nvkm_mmu_func
@@ -41,6 +42,7 @@ gm20b_mmu_fixed = {
.lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 0cc9c89c0e73..4ebb45a275ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -32,6 +32,7 @@ gp100_mmu = {
.lpg_shift = 16,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 5c7217697760..e864cfe57ae6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -32,6 +32,7 @@ gp10b_mmu = {
.lpg_shift = 16,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
.kind = gm200_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index bc4571a20690..d8d63ae7e0f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -25,6 +25,7 @@ struct nvkm_mmu_func {
} vmm;
const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ bool kind_sys;
};
extern const struct nvkm_mmu_func nv04_mmu;