diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 391 |
1 files changed, 219 insertions, 172 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e427f80344c4..2615912430cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -37,6 +37,12 @@ #include "nouveau_bo.h" #include "nouveau_ttm.h" #include "nouveau_gem.h" +#include "nouveau_mem.h" +#include "nouveau_vmm.h" + +#include <nvif/class.h> +#include <nvif/if500b.h> +#include <nvif/if900b.h> /* * NV10-NV40 tiling helpers @@ -48,8 +54,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nvkm_device *device = nvxx_device(&drm->client.device); - struct nvkm_fb *fb = device->fb; + struct nvkm_fb *fb = nvxx_fb(&drm->client.device); struct nvkm_fb_tile *tile = &fb->tile.region[i]; nouveau_fence_unref(®->fence); @@ -97,7 +102,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, static struct nouveau_drm_tile * nv10_bo_set_tiling(struct drm_device *dev, u32 addr, - u32 size, u32 pitch, u32 flags) + u32 size, u32 pitch, u32 zeta) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_fb *fb = nvxx_fb(&drm->client.device); @@ -120,8 +125,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, } if (found) - nv10_bo_update_tile_region(dev, found, addr, size, - pitch, flags); + nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); return found; } @@ -155,27 +159,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, struct nvif_device *device = &drm->client.device; if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { - if (nvbo->tile_mode) { + if (nvbo->mode) { if (device->info.chipset >= 0x40) { *align = 65536; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x30) { *align = 32768; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x20) { *align = 16384; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x10) { *align = 16384; - *size = roundup_64(*size, 32 * nvbo->tile_mode); + *size = roundup_64(*size, 32 * nvbo->mode); } } } else { - *size = roundup_64(*size, (1 << nvbo->page_shift)); - *align = max((1 << nvbo->page_shift), *align); + *size = roundup_64(*size, (1 << nvbo->page)); + *align = max((1 << nvbo->page), *align); } *size = roundup_64(*size, PAGE_SIZE); @@ -187,11 +191,13 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, struct sg_table *sg, struct reservation_object *robj, struct nouveau_bo **pnvbo) { - struct nouveau_drm *drm = nouveau_drm(cli->dev); + struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; + struct nvif_mmu *mmu = &cli->mmu; + struct nvif_vmm *vmm = &cli->vmm.vmm; size_t acc_size; - int ret; int type = ttm_bo_type_device; + int ret, i, pi = -1; if (!size) { NV_WARN(drm, "skipped size %016llx\n", size); @@ -207,19 +213,80 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); - nvbo->tile_mode = tile_mode; - nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; nvbo->cli = cli; - if (!nvxx_device(&drm->client.device)->func->cpu_coherent) - nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; + /* This is confusing, and doesn't actually mean we want an uncached + * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated + * into in nouveau_gem_new(). + */ + if (flags & TTM_PL_FLAG_UNCACHED) { + /* Determine if we can get a cache-coherent map, forcing + * uncached mapping if we can't. + */ + if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) + nvbo->force_coherent = true; + } + + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return -EINVAL; + } + + nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; + } else + if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return -EINVAL; + } + } else { + nvbo->zeta = (tile_flags & 0x00000007); + } + nvbo->mode = tile_mode; + nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); + + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && + (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) + continue; + if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host) + continue; + + /* Select this page size if it's the first that supports + * the potential memory domains, or when it's compatible + * with the requested compression settings. + */ + if (pi < 0 || !nvbo->comp || vmm->page[i].comp) + pi = i; + + /* Stop once the buffer is larger than the current page size. */ + if (size >= 1ULL << vmm->page[i].shift) + break; + } + + if (WARN_ON(pi < 0)) + return -EINVAL; - nvbo->page_shift = 12; - if (drm->client.vm) { - if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.vm->mmu->lpg_shift; + /* Disable compression if suitable settings couldn't be found. */ + if (nvbo->comp && !vmm->page[pi].comp) { + if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) + nvbo->kind = mmu->kind[nvbo->kind]; + nvbo->comp = 0; } + nvbo->page = vmm->page[pi].shift; nouveau_bo_fixup_align(nvbo, flags, &align, &size); nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; @@ -262,7 +329,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) unsigned i, fpfn, lpfn; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled @@ -270,7 +337,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) * speed up when alpha-blending and depth-test are enabled * at the same time. */ - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { + if (nvbo->zeta) { fpfn = vram_pages / 2; lpfn = ~0; } else { @@ -321,14 +388,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && memtype == TTM_PL_FLAG_VRAM && contig) { - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { - if (bo->mem.mem_type == TTM_PL_VRAM) { - struct nvkm_mem *mem = bo->mem.mm_node; - if (!nvkm_mm_contiguous(mem->mem)) - evict = true; - } - nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; + if (!nvbo->contig) { + nvbo->contig = true; force = true; + evict = true; } } @@ -376,7 +439,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) out: if (force && ret) - nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG; + nvbo->contig = false; ttm_bo_unreserve(bo); return ret; } @@ -446,7 +509,6 @@ void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nvkm_device *device = nvxx_device(&drm->client.device); struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; @@ -458,7 +520,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) return; for (i = 0; i < ttm_dma->ttm.num_pages; i++) - dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i], + dma_sync_single_for_device(drm->dev->dev, + ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE); } @@ -466,7 +529,6 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nvkm_device *device = nvxx_device(&drm->client.device); struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; @@ -478,7 +540,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) return; for (i = 0; i < ttm_dma->ttm.num_pages; i++) - dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i], + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE); } @@ -568,6 +630,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nvif_mmu *mmu = &drm->client.mmu; switch (type) { case TTM_PL_SYSTEM: @@ -584,7 +647,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { /* Some BARs do not support being ioremapped WC */ - if (nvxx_bar(&drm->client.device)->iomap_uncached) { + const u8 type = mmu->type[drm->ttm.type_vram].type; + if (type & NVIF_MEM_UNCACHED) { man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; } @@ -659,14 +723,14 @@ static int nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; + struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 10); if (ret == 0) { BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); - OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); - OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); + OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE); @@ -691,9 +755,9 @@ static int nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; - u64 src_offset = mem->vma[0].offset; - u64 dst_offset = mem->vma[1].offset; + struct nouveau_mem *mem = nouveau_mem(old_reg); + u64 src_offset = mem->vma[0].addr; + u64 dst_offset = mem->vma[1].addr; u32 page_count = new_reg->num_pages; int ret; @@ -729,9 +793,9 @@ static int nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; - u64 src_offset = mem->vma[0].offset; - u64 dst_offset = mem->vma[1].offset; + struct nouveau_mem *mem = nouveau_mem(old_reg); + u64 src_offset = mem->vma[0].addr; + u64 dst_offset = mem->vma[1].addr; u32 page_count = new_reg->num_pages; int ret; @@ -768,9 +832,9 @@ static int nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; - u64 src_offset = mem->vma[0].offset; - u64 dst_offset = mem->vma[1].offset; + struct nouveau_mem *mem = nouveau_mem(old_reg); + u64 src_offset = mem->vma[0].addr; + u64 dst_offset = mem->vma[1].addr; u32 page_count = new_reg->num_pages; int ret; @@ -806,14 +870,14 @@ static int nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; + struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); if (ret == 0) { BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); - OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); - OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); + OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); OUT_RING (chan, 0x00000000 /* COPY */); OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); } @@ -824,15 +888,15 @@ static int nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; + struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); if (ret == 0) { BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); - OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); - OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); - OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); + OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); + OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); } return ret; @@ -858,12 +922,12 @@ static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *mem = old_reg->mm_node; + struct nouveau_mem *mem = nouveau_mem(old_reg); u64 length = (new_reg->num_pages << PAGE_SHIFT); - u64 src_offset = mem->vma[0].offset; - u64 dst_offset = mem->vma[1].offset; - int src_tiled = !!mem->memtype; - int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype; + u64 src_offset = mem->vma[0].addr; + u64 dst_offset = mem->vma[1].addr; + int src_tiled = !!mem->kind; + int dst_tiled = !!nouveau_mem(new_reg)->kind; int ret; while (length) { @@ -1000,25 +1064,31 @@ static int nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, struct ttm_mem_reg *reg) { - struct nvkm_mem *old_mem = bo->mem.mm_node; - struct nvkm_mem *new_mem = reg->mm_node; - u64 size = (u64)reg->num_pages << PAGE_SHIFT; + struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); + struct nouveau_mem *new_mem = nouveau_mem(reg); + struct nvif_vmm *vmm = &drm->client.vmm.vmm; int ret; - ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift, - NV_MEM_ACCESS_RW, &old_mem->vma[0]); + ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, + old_mem->mem.size, &old_mem->vma[0]); if (ret) return ret; - ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift, - NV_MEM_ACCESS_RW, &old_mem->vma[1]); + ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, + new_mem->mem.size, &old_mem->vma[1]); + if (ret) + goto done; + + ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); + if (ret) + goto done; + + ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); +done: if (ret) { - nvkm_vm_put(&old_mem->vma[0]); - return ret; + nvif_vmm_put(vmm, &old_mem->vma[1]); + nvif_vmm_put(vmm, &old_mem->vma[0]); } - - nvkm_vm_map(&old_mem->vma[0], old_mem); - nvkm_vm_map(&old_mem->vma[1], new_mem); return 0; } @@ -1200,21 +1270,23 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, struct ttm_mem_reg *new_reg) { + struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nvkm_vma *vma; + struct nouveau_vma *vma; /* ttm can now (stupidly) pass the driver bos it didn't create... */ if (bo->destroy != nouveau_bo_del_ttm) return; - list_for_each_entry(vma, &nvbo->vma_list, head) { - if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM && - (new_reg->mem_type == TTM_PL_VRAM || - nvbo->page_shift != vma->vm->mmu->lpg_shift)) { - nvkm_vm_map(vma, new_reg->mm_node); - } else { + if (mem && new_reg->mem_type != TTM_PL_SYSTEM && + mem->mem.page == nvbo->page) { + list_for_each_entry(vma, &nvbo->vma_list, head) { + nouveau_vma_map(vma, mem); + } + } else { + list_for_each_entry(vma, &nvbo->vma_list, head) { WARN_ON(ttm_bo_wait(bo, false, false)); - nvkm_vm_unmap(vma); + nouveau_vma_unmap(vma); } } } @@ -1234,8 +1306,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, - nvbo->tile_mode, - nvbo->tile_flags); + nvbo->mode, nvbo->zeta); } return 0; @@ -1331,8 +1402,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); - struct nvkm_mem *mem = reg->mm_node; - int ret; + struct nouveau_mem *mem = nouveau_mem(reg); reg->bus.addr = NULL; reg->bus.offset = 0; @@ -1353,7 +1423,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) reg->bus.is_iomem = !drm->agp.cma; } #endif - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype) + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) /* untiled */ break; /* fallthrough, tiled memory */ @@ -1361,19 +1431,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) reg->bus.offset = reg->start << PAGE_SHIFT; reg->bus.base = device->func->resource_addr(device, 1); reg->bus.is_iomem = true; - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - struct nvkm_bar *bar = nvxx_bar(&drm->client.device); - int page_shift = 12; - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) - page_shift = mem->page_shift; + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + union { + struct nv50_mem_map_v0 nv50; + struct gf100_mem_map_v0 gf100; + } args; + u64 handle, length; + u32 argc = 0; + int ret; + + switch (mem->mem.object.oclass) { + case NVIF_CLASS_MEM_NV50: + args.nv50.version = 0; + args.nv50.ro = 0; + args.nv50.kind = mem->kind; + args.nv50.comp = mem->comp; + break; + case NVIF_CLASS_MEM_GF100: + args.gf100.version = 0; + args.gf100.ro = 0; + args.gf100.kind = mem->kind; + break; + default: + WARN_ON(1); + break; + } - ret = nvkm_bar_umap(bar, mem->size << 12, page_shift, - &mem->bar_vma); - if (ret) - return ret; + ret = nvif_object_map_handle(&mem->mem.object, + &argc, argc, + &handle, &length); + if (ret != 1) + return ret ? ret : -EINVAL; - nvkm_vm_map(&mem->bar_vma, mem); - reg->bus.offset = mem->bar_vma.offset; + reg->bus.base = 0; + reg->bus.offset = handle; } break; default: @@ -1385,13 +1476,22 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) { - struct nvkm_mem *mem = reg->mm_node; - - if (!mem->bar_vma.node) - return; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_mem *mem = nouveau_mem(reg); - nvkm_vm_unmap(&mem->bar_vma); - nvkm_vm_put(&mem->bar_vma); + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + switch (reg->mem_type) { + case TTM_PL_TT: + if (mem->kind) + nvif_object_unmap_handle(&mem->mem.object); + break; + case TTM_PL_VRAM: + nvif_object_unmap_handle(&mem->mem.object); + break; + default: + break; + } + } } static int @@ -1408,7 +1508,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) */ if (bo->mem.mem_type != TTM_PL_VRAM) { if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || - !nouveau_bo_tile_layout(nvbo)) + !nvbo->kind) return 0; if (bo->mem.mem_type == TTM_PL_SYSTEM) { @@ -1445,9 +1545,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) { struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; - struct nvkm_device *device; - struct drm_device *dev; - struct device *pdev; + struct device *dev; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1464,9 +1562,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } drm = nouveau_bdev(ttm->bdev); - device = nvxx_device(&drm->client.device); - dev = drm->dev; - pdev = device->dev; + dev = drm->dev->dev; #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { @@ -1476,7 +1572,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) if (swiotlb_nr_tbl()) { - return ttm_dma_populate((void *)ttm, dev->dev); + return ttm_dma_populate((void *)ttm, dev); } #endif @@ -1488,12 +1584,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { dma_addr_t addr; - addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, + addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(pdev, addr)) { + if (dma_mapping_error(dev, addr)) { while (i--) { - dma_unmap_page(pdev, ttm_dma->dma_address[i], + dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_BIDIRECTIONAL); ttm_dma->dma_address[i] = 0; } @@ -1511,9 +1607,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; - struct nvkm_device *device; - struct drm_device *dev; - struct device *pdev; + struct device *dev; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1521,9 +1615,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) return; drm = nouveau_bdev(ttm->bdev); - device = nvxx_device(&drm->client.device); - dev = drm->dev; - pdev = device->dev; + dev = drm->dev->dev; #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { @@ -1534,14 +1626,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) if (swiotlb_nr_tbl()) { - ttm_dma_unpopulate((void *)ttm, dev->dev); + ttm_dma_unpopulate((void *)ttm, dev); return; } #endif for (i = 0; i < ttm->num_pages; i++) { if (ttm_dma->dma_address[i]) { - dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, + dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_BIDIRECTIONAL); } } @@ -1576,48 +1668,3 @@ struct ttm_bo_driver nouveau_bo_driver = { .io_mem_free = &nouveau_ttm_io_mem_free, .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; - -struct nvkm_vma * -nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm) -{ - struct nvkm_vma *vma; - list_for_each_entry(vma, &nvbo->vma_list, head) { - if (vma->vm == vm) - return vma; - } - - return NULL; -} - -int -nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, - struct nvkm_vma *vma) -{ - const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - int ret; - - ret = nvkm_vm_get(vm, size, nvbo->page_shift, - NV_MEM_ACCESS_RW, vma); - if (ret) - return ret; - - if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && - (nvbo->bo.mem.mem_type == TTM_PL_VRAM || - nvbo->page_shift != vma->vm->mmu->lpg_shift)) - nvkm_vm_map(vma, nvbo->bo.mem.mm_node); - - list_add_tail(&vma->head, &nvbo->vma_list); - vma->refcount = 1; - return 0; -} - -void -nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma) -{ - if (vma->node) { - if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) - nvkm_vm_unmap(vma); - nvkm_vm_put(vma); - list_del(&vma->head); - } -} |