diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 262 |
1 files changed, 128 insertions, 134 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ba29a701ca1d..eea74b127b03 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -27,13 +27,9 @@ * Jeremy Kolb <jkolb@brandeis.edu> */ -#include <core/engine.h> +#include <linux/dma-mapping.h> #include <linux/swiotlb.h> -#include <subdev/fb.h> -#include <subdev/vm.h> -#include <subdev/bar.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct nouveau_fb_tile *tile = &pfb->tile.region[i]; struct nouveau_engine *engine; @@ -92,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) static void nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, - struct nouveau_fence *fence) + struct fence *fence) { struct nouveau_drm *drm = nouveau_drm(dev); if (tile) { spin_lock(&drm->tile.lock); - tile->fence = nouveau_fence_ref(fence); + tile->fence = (struct nouveau_fence *)fence_get(fence); tile->used = false; spin_unlock(&drm->tile.lock); } @@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, u32 size, u32 pitch, u32 flags) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct nouveau_drm_tile *tile, *found = NULL; int i; @@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, int *align, int *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; - if (device->card_type < NV_50) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { if (nvbo->tile_mode) { - if (device->chipset >= 0x40) { + if (device->info.chipset >= 0x40) { *align = 65536; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x30) { + } else if (device->info.chipset >= 0x30) { *align = 32768; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x20) { + } else if (device->info.chipset >= 0x20) { *align = 16384; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x10) { + } else if (device->info.chipset >= 0x10) { *align = 16384; *size = roundup(*size, 32 * nvbo->tile_mode); } @@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, int lpg_shift = 12; int max_size; - if (drm->client.base.vm) - lpg_shift = drm->client.base.vm->vmm->lpg_shift; + if (drm->client.vm) + lpg_shift = drm->client.vm->vmm->lpg_shift; max_size = INT_MAX & ~((1 << lpg_shift) - 1); if (size <= 0 || size > max_size) { - nv_warn(drm, "skipped size %x\n", (u32)size); + NV_WARN(drm, "skipped size %x\n", (u32)size); return -EINVAL; } @@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->bo.bdev = &drm->ttm.bdev; nvbo->page_shift = 12; - if (drm->client.base.vm) { + if (drm->client.vm) { if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + nvbo->page_shift = drm->client.vm->vmm->lpg_shift; } nouveau_bo_fixup_align(nvbo, flags, &align, &size); @@ -245,27 +241,26 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, } static void -set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) { *n = 0; if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; if (type & TTM_PL_FLAG_TT) - pl[(*n)++] = TTM_PL_FLAG_TT | flags; + pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; } static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); - u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; + u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; + unsigned i, fpfn, lpfn; - if ((nv_device(drm->device)->card_type == NV_10 || - nv_device(drm->device)->card_type == NV_11) && + if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* @@ -275,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) * at the same time. */ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { - nvbo->placement.fpfn = vram_pages / 2; - nvbo->placement.lpfn = ~0; + fpfn = vram_pages / 2; + lpfn = ~0; } else { - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = vram_pages / 2; + fpfn = 0; + lpfn = vram_pages / 2; + } + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = fpfn; + nvbo->placements[i].lpfn = lpfn; + } + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = fpfn; + nvbo->busy_placements[i].lpfn = lpfn; } } } @@ -500,21 +503,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - if (nv_device(drm->device)->card_type >= NV_50) { + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + /* Some BARs do not support being ioremapped WC */ + if (nvkm_bar(&drm->device)->iomap_uncached) { + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + } + man->func = &nouveau_vram_manager; man->io_reserve_fastpath = false; man->use_io_reserve_lru = true; } else { man->func = &ttm_bo_manager_func; } - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; break; case TTM_PL_TT: - if (nv_device(drm->device)->card_type >= NV_50) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) man->func = &nouveau_gart_manager; else if (drm->agp.stat != ENABLED) @@ -763,9 +773,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); OUT_RING (chan, handle); BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); - OUT_RING (chan, NvNotify0); - OUT_RING (chan, NvDmaFB); - OUT_RING (chan, NvDmaFB); + OUT_RING (chan, chan->drm->ntfy.handle); + OUT_RING (chan, chan->vram.handle); + OUT_RING (chan, chan->vram.handle); } return ret; @@ -852,7 +862,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); OUT_RING (chan, handle); BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); - OUT_RING (chan, NvNotify0); + OUT_RING (chan, chan->drm->ntfy.handle); } return ret; @@ -864,7 +874,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, { if (mem->mem_type == TTM_PL_TT) return NvDmaTT; - return NvDmaFB; + return chan->vram.handle; } static int @@ -922,12 +932,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, u64 size = (u64)mem->num_pages << PAGE_SHIFT; int ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[0]); if (ret) return ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[1]); if (ret) { nouveau_vm_put(&old_node->vma[0]); @@ -945,6 +955,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; + struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); struct nouveau_fence *fence; int ret; @@ -952,20 +963,21 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, * old nouveau_mem node, these will get cleaned up after ttm has * destroyed the ttm_mem_reg */ - if (nv_device(drm->device)->card_type >= NV_50) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_move_prep(drm, bo, new_mem); if (ret) return ret; } - mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); - ret = nouveau_fence_sync(bo->sync_obj, chan); + mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); + ret = nouveau_fence_sync(nouveau_bo(bo), chan, true); if (ret == 0) { ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { - ret = ttm_bo_move_accel_cleanup(bo, fence, + ret = ttm_bo_move_accel_cleanup(bo, + &fence->base, evict, no_wait_gpu, new_mem); @@ -973,7 +985,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, } } } - mutex_unlock(&chan->cli->mutex); + mutex_unlock(&cli->mutex); return ret; } @@ -1005,9 +1017,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) int ret; do { - struct nouveau_object *object; struct nouveau_channel *chan; - u32 handle = (mthd->engine << 16) | mthd->oclass; if (mthd->engine) chan = drm->cechan; @@ -1016,13 +1026,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm) if (chan == NULL) continue; - ret = nouveau_object_new(nv_object(drm), chan->handle, handle, - mthd->oclass, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, + mthd->oclass | (mthd->engine << 16), + mthd->oclass, NULL, 0, + &drm->ttm.copy); if (ret == 0) { - ret = mthd->init(chan, handle); + ret = mthd->init(chan, drm->ttm.copy.handle); if (ret) { - nouveau_object_del(nv_object(drm), - chan->handle, handle); + nvif_object_fini(&drm->ttm.copy); continue; } @@ -1040,12 +1051,15 @@ static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1073,12 +1087,15 @@ static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1135,7 +1152,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, if (new_mem->mem_type != TTM_PL_VRAM) return 0; - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, nvbo->tile_mode, nvbo->tile_flags); @@ -1151,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; + struct fence *fence = reservation_object_get_excl(bo->resv); - nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); + nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; } @@ -1166,7 +1184,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); if (ret) return ret; @@ -1196,14 +1214,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Fallback to software copy. */ - spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); - spin_unlock(&bo->bdev->fence_lock); if (ret == 0) ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); out: - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (ret) nouveau_bo_vm_cleanup(bo, NULL, &new_tile); else @@ -1227,7 +1243,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *node = mem->mm_node; - struct drm_device *dev = drm->dev; int ret; mem->bus.addr = NULL; @@ -1246,19 +1261,19 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) if (drm->agp.stat == ENABLED) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = drm->agp.base; - mem->bus.is_iomem = !dev->agp->cant_use_aperture; + mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture; } #endif - if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) /* untiled */ break; /* fallthrough, tiled memory */ case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1); + mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1); mem->bus.is_iomem = true; - if (nv_device(drm->device)->card_type >= NV_50) { - struct nouveau_bar *bar = nouveau_bar(drm->device); + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + struct nouveau_bar *bar = nvkm_bar(&drm->device); ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, &node->bar_vma); @@ -1278,7 +1293,7 @@ static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_bar *bar = nouveau_bar(drm->device); + struct nouveau_bar *bar = nvkm_bar(&drm->device); struct nouveau_mem *node = mem->mm_node; if (!node->bar_vma.node) @@ -1292,15 +1307,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_device *device = nv_device(drm->device); - u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; - int ret; + struct nvif_device *device = &drm->device; + u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; + int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (nv_device(drm->device)->card_type < NV_50 || + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !nouveau_bo_tile_layout(nvbo)) return 0; @@ -1315,13 +1330,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } /* make sure bo is in mappable vram */ - if (nv_device(drm->device)->card_type >= NV_50 || + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || bo->mem.start + bo->mem.num_pages < mappable) return 0; + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = 0; + nvbo->placements[i].lpfn = mappable; + } + + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = 0; + nvbo->busy_placements[i].lpfn = mappable; + } - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable; nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); return nouveau_bo_validate(nvbo, false, false); } @@ -1333,6 +1355,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1349,8 +1372,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } drm = nouveau_bdev(ttm->bdev); - device = nv_device(drm->device); + device = nvkm_device(&drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1370,17 +1394,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } for (i = 0; i < ttm->num_pages; i++) { - ttm_dma->dma_address[i] = nv_device_map_page(device, - ttm->pages[i]); - if (!ttm_dma->dma_address[i]) { + dma_addr_t addr; + + addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(pdev, addr)) { while (--i) { - nv_device_unmap_page(device, - ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); ttm_dma->dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } + + ttm_dma->dma_address[i] = addr; } return 0; } @@ -1392,6 +1421,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1399,8 +1429,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) return; drm = nouveau_bdev(ttm->bdev); - device = nv_device(drm->device); + device = nvkm_device(&drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1418,7 +1449,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm_dma->dma_address[i]) { - nv_device_unmap_page(device, ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); } } @@ -1426,47 +1458,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) } void -nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) -{ - struct nouveau_fence *new_fence = nouveau_fence_ref(fence); - struct nouveau_fence *old_fence = NULL; - - spin_lock(&nvbo->bo.bdev->fence_lock); - old_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = new_fence; - spin_unlock(&nvbo->bo.bdev->fence_lock); - - nouveau_fence_unref(&old_fence); -} - -static void -nouveau_bo_fence_unref(void **sync_obj) -{ - nouveau_fence_unref((struct nouveau_fence **)sync_obj); -} - -static void * -nouveau_bo_fence_ref(void *sync_obj) +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) { - return nouveau_fence_ref(sync_obj); -} - -static bool -nouveau_bo_fence_signalled(void *sync_obj) -{ - return nouveau_fence_done(sync_obj); -} + struct reservation_object *resv = nvbo->bo.resv; -static int -nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) -{ - return nouveau_fence_wait(sync_obj, lazy, intr); -} - -static int -nouveau_bo_fence_flush(void *sync_obj) -{ - return 0; + if (exclusive) + reservation_object_add_excl_fence(resv, &fence->base); + else if (fence) + reservation_object_add_shared_fence(resv, &fence->base); } struct ttm_bo_driver nouveau_bo_driver = { @@ -1479,11 +1478,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .move_notify = nouveau_bo_move_ntfy, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_bo_fence_signalled, - .sync_obj_wait = nouveau_bo_fence_wait, - .sync_obj_flush = nouveau_bo_fence_flush, - .sync_obj_unref = nouveau_bo_fence_unref, - .sync_obj_ref = nouveau_bo_fence_ref, .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, |