diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c | 287 |
1 files changed, 116 insertions, 171 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 8548adb91dcc..370dcd8ff7b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -21,251 +21,196 @@ * * Authors: Ben Skeggs */ -#include "priv.h" +#include "nv50.h" -#include <core/device.h> #include <core/gpuobj.h> #include <subdev/fb.h> #include <subdev/mmu.h> #include <subdev/timer.h> -struct nv50_bar_priv { - struct nvkm_bar base; - spinlock_t lock; - struct nvkm_gpuobj *mem; - struct nvkm_gpuobj *pad; - struct nvkm_gpuobj *pgd; - struct nvkm_vm *bar1_vm; - struct nvkm_gpuobj *bar1; - struct nvkm_vm *bar3_vm; - struct nvkm_gpuobj *bar3; -}; - -static int -nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags, - struct nvkm_vma *vma) -{ - struct nv50_bar_priv *priv = (void *)bar; - int ret; - - ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma); - if (ret) - return ret; - - nvkm_vm_map(vma, mem); - return 0; -} - -static int -nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags, - struct nvkm_vma *vma) +struct nvkm_vm * +nv50_bar_kmap(struct nvkm_bar *base) { - struct nv50_bar_priv *priv = (void *)bar; - int ret; - - ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma); - if (ret) - return ret; - - nvkm_vm_map(vma, mem); - return 0; + return nv50_bar(base)->bar3_vm; } -static void -nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) +int +nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma) { - nvkm_vm_unmap(vma); - nvkm_vm_put(vma); + struct nv50_bar *bar = nv50_bar(base); + return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma); } static void -nv50_bar_flush(struct nvkm_bar *bar) -{ - struct nv50_bar_priv *priv = (void *)bar; - unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); - nv_wr32(priv, 0x00330c, 0x00000001); - if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000)) - nv_warn(priv, "flush timeout\n"); - spin_unlock_irqrestore(&priv->lock, flags); -} - -void -g84_bar_flush(struct nvkm_bar *bar) +nv50_bar_flush(struct nvkm_bar *base) { - struct nv50_bar_priv *priv = (void *)bar; + struct nv50_bar *bar = nv50_bar(base); + struct nvkm_device *device = bar->base.subdev.device; unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); - nv_wr32(bar, 0x070000, 0x00000001); - if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) - nv_warn(priv, "flush timeout\n"); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&bar->base.lock, flags); + nvkm_wr32(device, 0x00330c, 0x00000001); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x00330c) & 0x00000002)) + break; + ); + spin_unlock_irqrestore(&bar->base.lock, flags); } -static int -nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +int +nv50_bar_oneinit(struct nvkm_bar *base) { - struct nvkm_device *device = nv_device(parent); - struct nvkm_object *heap; + struct nv50_bar *bar = nv50_bar(base); + struct nvkm_device *device = bar->base.subdev.device; + static struct lock_class_key bar1_lock; + static struct lock_class_key bar3_lock; struct nvkm_vm *vm; - struct nv50_bar_priv *priv; u64 start, limit; int ret; - ret = nvkm_bar_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); + ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, - NVOBJ_FLAG_HEAP, &priv->mem); - heap = nv_object(priv->mem); + ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem, + &bar->pad); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), heap, - (device->chipset == 0x50) ? 0x1400 : 0x0200, - 0, 0, &priv->pad); - if (ret) - return ret; - - ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd); + ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd); if (ret) return ret; /* BAR3 */ start = 0x0100000000ULL; - limit = start + nv_device_resource_len(device, 3); + limit = start + device->func->resource_size(device, 3); - ret = nvkm_vm_new(device, start, limit, start, &vm); + ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm); if (ret) return ret; - atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); + atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]); - ret = nvkm_gpuobj_new(nv_object(priv), heap, - ((limit-- - start) >> 12) * 8, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); - vm->pgt[0].refcount[0] = 1; + ret = nvkm_vm_boot(vm, limit-- - start); if (ret) return ret; - ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd); + ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd); nvkm_vm_ref(NULL, &vm, NULL); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3); + ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3); if (ret) return ret; - nv_wo32(priv->bar3, 0x00, 0x7fc00000); - nv_wo32(priv->bar3, 0x04, lower_32_bits(limit)); - nv_wo32(priv->bar3, 0x08, lower_32_bits(start)); - nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 | - upper_32_bits(start)); - nv_wo32(priv->bar3, 0x10, 0x00000000); - nv_wo32(priv->bar3, 0x14, 0x00000000); + nvkm_kmap(bar->bar3); + nvkm_wo32(bar->bar3, 0x00, 0x7fc00000); + nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit)); + nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start)); + nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nvkm_wo32(bar->bar3, 0x10, 0x00000000); + nvkm_wo32(bar->bar3, 0x14, 0x00000000); + nvkm_done(bar->bar3); /* BAR1 */ start = 0x0000000000ULL; - limit = start + nv_device_resource_len(device, 1); + limit = start + device->func->resource_size(device, 1); - ret = nvkm_vm_new(device, start, limit--, start, &vm); + ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm); if (ret) return ret; - atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); + atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]); - ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd); + ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd); nvkm_vm_ref(NULL, &vm, NULL); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1); + ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1); if (ret) return ret; - nv_wo32(priv->bar1, 0x00, 0x7fc00000); - nv_wo32(priv->bar1, 0x04, lower_32_bits(limit)); - nv_wo32(priv->bar1, 0x08, lower_32_bits(start)); - nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 | - upper_32_bits(start)); - nv_wo32(priv->bar1, 0x10, 0x00000000); - nv_wo32(priv->bar1, 0x14, 0x00000000); - - priv->base.alloc = nvkm_bar_alloc; - priv->base.kmap = nv50_bar_kmap; - priv->base.umap = nv50_bar_umap; - priv->base.unmap = nv50_bar_unmap; - if (device->chipset == 0x50) - priv->base.flush = nv50_bar_flush; - else - priv->base.flush = g84_bar_flush; - spin_lock_init(&priv->lock); + nvkm_kmap(bar->bar1); + nvkm_wo32(bar->bar1, 0x00, 0x7fc00000); + nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit)); + nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start)); + nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nvkm_wo32(bar->bar1, 0x10, 0x00000000); + nvkm_wo32(bar->bar1, 0x14, 0x00000000); + nvkm_done(bar->bar1); return 0; } -static void -nv50_bar_dtor(struct nvkm_object *object) +int +nv50_bar_init(struct nvkm_bar *base) { - struct nv50_bar_priv *priv = (void *)object; - nvkm_gpuobj_ref(NULL, &priv->bar1); - nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd); - nvkm_gpuobj_ref(NULL, &priv->bar3); - if (priv->bar3_vm) { - nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]); - nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd); - } - nvkm_gpuobj_ref(NULL, &priv->pgd); - nvkm_gpuobj_ref(NULL, &priv->pad); - nvkm_gpuobj_ref(NULL, &priv->mem); - nvkm_bar_destroy(&priv->base); -} - -static int -nv50_bar_init(struct nvkm_object *object) -{ - struct nv50_bar_priv *priv = (void *)object; - int ret, i; - - ret = nvkm_bar_init(&priv->base); - if (ret) - return ret; - - nv_mask(priv, 0x000200, 0x00000100, 0x00000000); - nv_mask(priv, 0x000200, 0x00000100, 0x00000100); - nv_wr32(priv, 0x100c80, 0x00060001); - if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) { - nv_error(priv, "vm flush timeout\n"); + struct nv50_bar *bar = nv50_bar(base); + struct nvkm_device *device = bar->base.subdev.device; + int i; + + nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); + nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); + nvkm_wr32(device, 0x100c80, 0x00060001); + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x100c80) & 0x00000001)) + break; + ) < 0) return -EBUSY; - } - nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12); - nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); - nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); - nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); + nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12); + nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12); + nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4); + nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4); for (i = 0; i < 8; i++) - nv_wr32(priv, 0x001900 + (i * 4), 0x00000000); + nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000); return 0; } -static int -nv50_bar_fini(struct nvkm_object *object, bool suspend) +void * +nv50_bar_dtor(struct nvkm_bar *base) { - struct nv50_bar_priv *priv = (void *)object; - return nvkm_bar_fini(&priv->base, suspend); + struct nv50_bar *bar = nv50_bar(base); + nvkm_gpuobj_del(&bar->bar1); + nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd); + nvkm_gpuobj_del(&bar->bar3); + if (bar->bar3_vm) { + nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]); + nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd); + } + nvkm_gpuobj_del(&bar->pgd); + nvkm_gpuobj_del(&bar->pad); + nvkm_gpuobj_del(&bar->mem); + return bar; } -struct nvkm_oclass -nv50_bar_oclass = { - .handle = NV_SUBDEV(BAR, 0x50), - .ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv50_bar_ctor, - .dtor = nv50_bar_dtor, - .init = nv50_bar_init, - .fini = nv50_bar_fini, - }, +int +nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device, + int index, u32 pgd_addr, struct nvkm_bar **pbar) +{ + struct nv50_bar *bar; + if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL))) + return -ENOMEM; + nvkm_bar_ctor(func, device, index, &bar->base); + bar->pgd_addr = pgd_addr; + *pbar = &bar->base; + return 0; +} + +static const struct nvkm_bar_func +nv50_bar_func = { + .dtor = nv50_bar_dtor, + .oneinit = nv50_bar_oneinit, + .init = nv50_bar_init, + .kmap = nv50_bar_kmap, + .umap = nv50_bar_umap, + .flush = nv50_bar_flush, }; + +int +nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +{ + return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar); +} |