summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-10-31 20:56:19 +0300
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 06:32:28 +0300
commit6ce513529aa57a8c4f61e588142643a9252037ae (patch)
tree07d13795c75fb4e13ae6eff32bdec434d79f3676 /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
parent473f9aca6c1063bf77cd61c2683fc496850d63b3 (diff)
downloadlinux-6ce513529aa57a8c4f61e588142643a9252037ae.tar.xz
drm/nouveau/mmu/nv44: implement new vmm backend
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c165
1 files changed, 165 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
index 2b5704269ac9..b834e4352334 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -21,8 +21,158 @@
*/
#include "vmm.h"
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ dma_addr_t *list, u32 ptei, u32 ptes)
+{
+ u32 pteo = (ptei << 2) & ~0x0000000f;
+ u32 tmp[4];
+
+ tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+ tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+ tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+ tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+ while (ptes--) {
+ u32 addr = (list ? *list++ : vmm->null) >> 12;
+ switch (ptei++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
+ }
+ }
+
+ VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+ VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+ VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+ VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ dma_addr_t tmp[4], i;
+
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ for (i = 0; i < pten; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes >= 4) {
+ for (i = 0; i < 4; i++, addr += 0x1000)
+ tmp[i] = addr >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ for (i = 0; i < ptes; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+ }
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ map->dma += pten;
+ }
+
+ while (ptes >= 4) {
+ u32 tmp[4], i;
+ for (i = 0; i < 4; i++)
+ tmp[i] = *map->dma++ >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+ map->dma += ptes;
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes > 4) {
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ ptes -= 4;
+ }
+
+ if (ptes)
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+ nvkm_done(pt->memory);
+}
+
static const struct nvkm_vmm_desc_func
nv44_vmm_desc_pgt = {
+ .unmap = nv44_vmm_pgt_unmap,
+ .dma = nv44_vmm_pgt_dma,
+ .sgl = nv44_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc
@@ -31,8 +181,23 @@ nv44_vmm_desc_12[] = {
{}
};
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+ nvkm_wr32(device, 0x100808, 0x000000020);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
+ nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
static const struct nvkm_vmm_func
nv44_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv44_vmm_flush,
.page = {
{ 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}