summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2015-08-20 07:54:19 +0300
committerBen Skeggs <bskeggs@redhat.com>2015-08-28 05:40:40 +0300
commit8f0649b5c6e70ec18122255690e39f010c12a614 (patch)
treec6d001b9db9fdc0db86488d79dec2a25d0d2bf0f /drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
parent9a65a38c456ebac97f0498e85fe26f6d26fe3936 (diff)
downloadlinux-8f0649b5c6e70ec18122255690e39f010c12a614.tar.xz
drm/nouveau/fifo: convert user classes to new-style nvkm_object
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c407
1 files changed, 189 insertions, 218 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 264c9705bccc..2595cf92ff80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -31,30 +31,13 @@
#include <nvif/class.h>
#include <nvif/unpack.h>
-#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
-static const struct {
- u64 subdev;
- u64 mask;
-} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_CE2)),
- _(NVDEV_ENGINE_MSPDEC , 0),
- _(NVDEV_ENGINE_MSPPP , 0),
- _(NVDEV_ENGINE_MSVLD , 0),
- _(NVDEV_ENGINE_CE0 , 0),
- _(NVDEV_ENGINE_CE1 , 0),
- _(NVDEV_ENGINE_MSENC , 0),
-};
-#undef _
-#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-
static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
{
- struct nvkm_object *obj = (void *)chan;
- struct gk104_fifo *fifo = (void *)obj->engine;
+ struct gk104_fifo *fifo = chan->fifo;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_client *client = chan->base.object.client;
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
@@ -62,198 +45,249 @@ gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
break;
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
+ chan->base.chid, client->name);
return -EBUSY;
}
return 0;
}
-static int
-gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *object)
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
- struct gk104_fifo_base *base = (void *)parent->parent;
- struct gk104_fifo_chan *chan = (void *)parent;
- struct nvkm_gpuobj *engn = &base->base.gpuobj;
- u32 addr;
- int ret;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
+ switch (engine->subdev.index) {
+ case NVDEV_ENGINE_SW :
case NVDEV_ENGINE_CE0 :
case NVDEV_ENGINE_CE1 :
- case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
+ case NVDEV_ENGINE_CE2 : return 0x0000;
+ case NVDEV_ENGINE_GR : return 0x0210;
+ case NVDEV_ENGINE_MSPDEC: return 0x0250;
+ case NVDEV_ENGINE_MSPPP : return 0x0260;
+ case NVDEV_ENGINE_MSVLD : return 0x0270;
default:
- return -EINVAL;
+ WARN_ON(1);
+ return 0;
}
+}
- ret = gk104_fifo_chan_kick(chan);
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ int ret;
+
+ ret = gk104_fifo_gpfifo_kick(chan);
if (ret && suspend)
return ret;
- if (addr) {
- nvkm_kmap(engn);
- nvkm_wo32(engn, addr + 0x00, 0x00000000);
- nvkm_wo32(engn, addr + 0x04, 0x00000000);
- nvkm_done(engn);
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ nvkm_done(inst);
+ }
+
+ return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+
+ if (offset) {
+ u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_done(inst);
}
return 0;
}
+static void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
static int
-gk104_fifo_context_attach(struct nvkm_object *parent,
- struct nvkm_object *object)
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
{
- struct gk104_fifo_base *base = (void *)parent->parent;
- struct nvkm_gpuobj *engn = &base->base.gpuobj;
- struct nvkm_engctx *ectx = (void *)object;
- u32 addr;
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ int engn = engine->subdev.index;
int ret;
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW :
- return 0;
- case NVDEV_ENGINE_CE0:
- case NVDEV_ENGINE_CE1:
- case NVDEV_ENGINE_CE2:
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
- default:
- return -EINVAL;
- }
- if (!ectx->vma.node) {
- ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
- NV_MEM_ACCESS_RW, &ectx->vma);
- if (ret)
- return ret;
-
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ if (object->oclass) {
+ return nvkm_gpuobj_map(nv_gpuobj(object), chan->vm,
+ NV_MEM_ACCESS_RW,
+ &chan->engn[engn].vma);
}
- nvkm_kmap(engn);
- nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
- nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
- nvkm_done(engn);
- return 0;
+ ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ if (ret)
+ return ret;
+
+ return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
}
-static int
-gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+static void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
{
- struct gk104_fifo *fifo = (void *)object->engine;
- struct gk104_fifo_chan *chan = (void *)object;
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 chid = chan->base.chid;
+ u32 coff = chan->base.chid * 8;
- if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
- nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+ if (!list_empty(&chan->head)) {
+ list_del_init(&chan->head);
+ nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
gk104_fifo_runlist_update(fifo, chan->engine);
}
- nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
- return nvkm_fifo_channel_fini(&chan->base, suspend);
+ nvkm_wr32(device, 0x800000 + coff, 0x00000000);
}
-static int
-gk104_fifo_chan_init(struct nvkm_object *object)
+static void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
{
- struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
- struct gk104_fifo *fifo = (void *)object->engine;
- struct gk104_fifo_chan *chan = (void *)object;
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 chid = chan->base.chid;
- int ret;
+ u32 addr = chan->base.inst->addr >> 12;
+ u32 coff = chan->base.chid * 8;
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
+ nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+ nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
- nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
- nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
-
- if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
- nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ if (list_empty(&chan->head) && !chan->killed) {
+ list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
gk104_fifo_runlist_update(fifo, chan->engine);
- nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
}
+}
- return 0;
+static void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+ nvkm_gpuobj_del(&chan->pgd);
+ return chan;
}
-static int
-gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = g84_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gk104_fifo_gpfifo_engine_init,
+ .engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
{
union {
struct kepler_channel_gpfifo_a_v0 v0;
} *args = data;
- struct gk104_fifo *fifo = (void *)engine;
- struct gk104_fifo_base *base = (void *)parent;
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_object *parent = oclass->parent;
struct gk104_fifo_chan *chan;
- struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
u64 usermem, ioffset, ilength;
u32 engines;
int ret, i;
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx"
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
"ioffset %016llx ilength %08x engine %08x\n",
args->v0.version, args->v0.vm, args->v0.ioffset,
args->v0.ilength, args->v0.engine);
- if (args->v0.vm)
- return -ENOENT;
} else
return ret;
- for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
- if (!nvkm_engine(parent, fifo_engine[i].subdev))
+ /* determine which downstream engines are present */
+ for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+ u64 subdevs = gk104_fifo_engine_subdev(i);
+ if (!nvkm_device_engine(device, __ffs64(subdevs)))
continue;
engines |= (1 << i);
}
+ /* if this is an engine mask query, we're done */
if (!args->v0.engine) {
- static struct nvkm_oclass oclass = {
- .ofuncs = &nvkm_object_ofuncs,
- };
args->v0.engine = engines;
- return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
+ return nvkm_object_new(oclass, NULL, 0, pobject);
}
- engines &= args->v0.engine;
- if (!engines) {
- nvif_ioctl(parent, "unsupported engines %08x\n",
- args->v0.engine);
+ /* check that we support a requested engine - note that the user
+ * argument is a mask in order to allow the user to request (for
+ * example) *any* copy engine, but doesn't matter which.
+ */
+ args->v0.engine &= engines;
+ if (!args->v0.engine) {
+ nvif_ioctl(parent, "no supported engine\n");
return -ENODEV;
}
- i = __ffs(engines);
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
- fifo->user.bar.offset, 0x200, 0,
- fifo_engine[i].mask, &chan);
- *pobject = nv_object(chan);
+ /* allocate the channel */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ chan->engine = __ffs(args->v0.engine);
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+ 0x1000, 0x1000, true, args->v0.vm, 0,
+ gk104_fifo_engine_subdev(chan->engine),
+ 1, fifo->user.bar.offset, 0x200,
+ oclass, &chan->base);
if (ret)
return ret;
- chan->base.inst = base->base.gpuobj.addr;
args->v0.chid = chan->base.chid;
- nv_parent(chan)->context_attach = gk104_fifo_context_attach;
- nv_parent(chan)->context_detach = gk104_fifo_context_detach;
- chan->engine = i;
+ /* page directory */
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+ nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+ nvkm_done(chan->base.inst);
+ ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ if (ret)
+ return ret;
+
+ /* clear channel control registers */
usermem = chan->base.chid * 0x200;
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
@@ -264,94 +298,31 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
- nvkm_kmap(ramfc);
- nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
- nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
- nvkm_wo32(ramfc, 0x10, 0x0000face);
- nvkm_wo32(ramfc, 0x30, 0xfffff902);
- nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
- nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
- nvkm_wo32(ramfc, 0x84, 0x20400000);
- nvkm_wo32(ramfc, 0x94, 0x30000001);
- nvkm_wo32(ramfc, 0x9c, 0x00000100);
- nvkm_wo32(ramfc, 0xac, 0x0000001f);
- nvkm_wo32(ramfc, 0xe8, chan->base.chid);
- nvkm_wo32(ramfc, 0xb8, 0xf8000000);
- nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
- nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
- nvkm_done(ramfc);
- return 0;
-}
-
-struct nvkm_ofuncs
-gk104_fifo_chan_ofuncs = {
- .ctor = gk104_fifo_chan_ctor,
- .dtor = _nvkm_fifo_channel_dtor,
- .init = gk104_fifo_chan_init,
- .fini = gk104_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-struct nvkm_oclass
-gk104_fifo_sclass[] = {
- { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
- {}
-};
-
-static int
-gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_device *device = nv_engine(engine)->subdev.device;
- struct gk104_fifo_base *base;
- int ret;
-
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
- if (ret)
- return ret;
-
- nvkm_kmap(&base->base.gpuobj);
- nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
- nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
- nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
- nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
- nvkm_done(&base->base.gpuobj);
-
- ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
- if (ret)
- return ret;
-
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+ nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->base.inst);
return 0;
}
-static void
-gk104_fifo_context_dtor(struct nvkm_object *object)
-{
- struct gk104_fifo_base *base = (void *)object;
- nvkm_vm_ref(NULL, &base->vm, base->pgd);
- nvkm_gpuobj_del(&base->pgd);
- nvkm_fifo_context_destroy(&base->base);
-}
-
-struct nvkm_oclass
-gk104_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_fifo_context_ctor,
- .dtor = gk104_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
+const struct nvkm_fifo_chan_oclass
+gk104_fifo_gpfifo_oclass = {
+ .base.oclass = KEPLER_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
};