diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 266 |
1 files changed, 96 insertions, 170 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 10c987a654ec..364ea4492acc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -23,181 +23,90 @@ */ #include "priv.h" -#include <core/memory.h> #include <subdev/bar.h> /****************************************************************************** * instmem object base implementation *****************************************************************************/ -#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory) - -struct nvkm_instobj { - struct nvkm_memory memory; - struct nvkm_memory *parent; - struct nvkm_instmem *imem; - struct list_head head; - u32 *suspend; - void __iomem *map; -}; - -static enum nvkm_memory_target -nvkm_instobj_target(struct nvkm_memory *memory) -{ - memory = nvkm_instobj(memory)->parent; - return nvkm_memory_target(memory); -} - -static u64 -nvkm_instobj_addr(struct nvkm_memory *memory) -{ - memory = nvkm_instobj(memory)->parent; - return nvkm_memory_addr(memory); -} - -static u64 -nvkm_instobj_size(struct nvkm_memory *memory) -{ - memory = nvkm_instobj(memory)->parent; - return nvkm_memory_size(memory); -} - static void -nvkm_instobj_release(struct nvkm_memory *memory) +nvkm_instobj_load(struct nvkm_instobj *iobj) { - struct nvkm_instobj *iobj = nvkm_instobj(memory); - nvkm_bar_flush(iobj->imem->subdev.device->bar); -} - -static void __iomem * -nvkm_instobj_acquire(struct nvkm_memory *memory) -{ - return nvkm_instobj(memory)->map; -} - -static u32 -nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset) -{ - return ioread32_native(nvkm_instobj(memory)->map + offset); -} - -static void -nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) -{ - iowrite32_native(data, nvkm_instobj(memory)->map + offset); -} + struct nvkm_memory *memory = &iobj->memory; + const u64 size = nvkm_memory_size(memory); + void __iomem *map; + int i; -static void -nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) -{ - memory = nvkm_instobj(memory)->parent; - nvkm_memory_map(memory, vma, offset); -} + if (!(map = nvkm_kmap(memory))) { + for (i = 0; i < size; i += 4) + nvkm_wo32(memory, i, iobj->suspend[i / 4]); + } else { + memcpy_toio(map, iobj->suspend, size); + } + nvkm_done(memory); -static void * -nvkm_instobj_dtor(struct nvkm_memory *memory) -{ - struct nvkm_instobj *iobj = nvkm_instobj(memory); - spin_lock(&iobj->imem->lock); - list_del(&iobj->head); - spin_unlock(&iobj->imem->lock); - nvkm_memory_del(&iobj->parent); - return iobj; + kvfree(iobj->suspend); + iobj->suspend = NULL; } -static const struct nvkm_memory_func -nvkm_instobj_func = { - .dtor = nvkm_instobj_dtor, - .target = nvkm_instobj_target, - .addr = nvkm_instobj_addr, - .size = nvkm_instobj_size, - .acquire = nvkm_instobj_acquire, - .release = nvkm_instobj_release, - .rd32 = nvkm_instobj_rd32, - .wr32 = nvkm_instobj_wr32, - .map = nvkm_instobj_map, -}; - -static void -nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) +static int +nvkm_instobj_save(struct nvkm_instobj *iobj) { - memory = nvkm_instobj(memory)->parent; - nvkm_memory_boot(memory, vm); -} + struct nvkm_memory *memory = &iobj->memory; + const u64 size = nvkm_memory_size(memory); + void __iomem *map; + int i; -static void -nvkm_instobj_release_slow(struct nvkm_memory *memory) -{ - struct nvkm_instobj *iobj = nvkm_instobj(memory); - nvkm_instobj_release(memory); - nvkm_done(iobj->parent); -} + iobj->suspend = kvmalloc(size, GFP_KERNEL); + if (!iobj->suspend) + return -ENOMEM; -static void __iomem * -nvkm_instobj_acquire_slow(struct nvkm_memory *memory) -{ - struct nvkm_instobj *iobj = nvkm_instobj(memory); - iobj->map = nvkm_kmap(iobj->parent); - if (iobj->map) - memory->func = &nvkm_instobj_func; - return iobj->map; + if (!(map = nvkm_kmap(memory))) { + for (i = 0; i < size; i += 4) + iobj->suspend[i / 4] = nvkm_ro32(memory, i); + } else { + memcpy_fromio(iobj->suspend, map, size); + } + nvkm_done(memory); + return 0; } -static u32 -nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) +void +nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj) { - struct nvkm_instobj *iobj = nvkm_instobj(memory); - return nvkm_ro32(iobj->parent, offset); + spin_lock(&imem->lock); + list_del(&iobj->head); + spin_unlock(&imem->lock); } -static void -nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) +void +nvkm_instobj_ctor(const struct nvkm_memory_func *func, + struct nvkm_instmem *imem, struct nvkm_instobj *iobj) { - struct nvkm_instobj *iobj = nvkm_instobj(memory); - return nvkm_wo32(iobj->parent, offset, data); + nvkm_memory_ctor(func, &iobj->memory); + iobj->suspend = NULL; + spin_lock(&imem->lock); + list_add_tail(&iobj->head, &imem->list); + spin_unlock(&imem->lock); } -static const struct nvkm_memory_func -nvkm_instobj_func_slow = { - .dtor = nvkm_instobj_dtor, - .target = nvkm_instobj_target, - .addr = nvkm_instobj_addr, - .size = nvkm_instobj_size, - .boot = nvkm_instobj_boot, - .acquire = nvkm_instobj_acquire_slow, - .release = nvkm_instobj_release_slow, - .rd32 = nvkm_instobj_rd32_slow, - .wr32 = nvkm_instobj_wr32_slow, - .map = nvkm_instobj_map, -}; - int nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, struct nvkm_memory **pmemory) { + struct nvkm_subdev *subdev = &imem->subdev; struct nvkm_memory *memory = NULL; - struct nvkm_instobj *iobj; u32 offset; int ret; ret = imem->func->memory_new(imem, size, align, zero, &memory); - if (ret) + if (ret) { + nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret); goto done; - - if (!imem->func->persistent) { - if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) { - ret = -ENOMEM; - goto done; - } - - nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); - iobj->parent = memory; - iobj->imem = imem; - spin_lock(&iobj->imem->lock); - list_add_tail(&iobj->head, &imem->list); - spin_unlock(&iobj->imem->lock); - memory = &iobj->memory; } + nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align, + zero, nvkm_memory_addr(memory), nvkm_memory_size(memory)); + if (!imem->func->zero && zero) { void __iomem *map = nvkm_kmap(memory); if (unlikely(!map)) { @@ -211,7 +120,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, done: if (ret) - nvkm_memory_del(&memory); + nvkm_memory_unref(&memory); *pmemory = memory; return ret; } @@ -232,39 +141,46 @@ nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data) return imem->func->wr32(imem, addr, data); } +void +nvkm_instmem_boot(struct nvkm_instmem *imem) +{ + /* Separate bootstrapped objects from normal list, as we need + * to make sure they're accessed with the slowpath on suspend + * and resume. + */ + struct nvkm_instobj *iobj, *itmp; + spin_lock(&imem->lock); + list_for_each_entry_safe(iobj, itmp, &imem->list, head) { + list_move_tail(&iobj->head, &imem->boot); + } + spin_unlock(&imem->lock); +} + static int nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_instmem *imem = nvkm_instmem(subdev); struct nvkm_instobj *iobj; - int i; - - if (imem->func->fini) - imem->func->fini(imem); if (suspend) { list_for_each_entry(iobj, &imem->list, head) { - struct nvkm_memory *memory = iobj->parent; - u64 size = nvkm_memory_size(memory); + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } - iobj->suspend = vmalloc(size); - if (!iobj->suspend) - return -ENOMEM; + nvkm_bar_bar2_fini(subdev->device); - for (i = 0; i < size; i += 4) - iobj->suspend[i / 4] = nvkm_ro32(memory, i); + list_for_each_entry(iobj, &imem->boot, head) { + int ret = nvkm_instobj_save(iobj); + if (ret) + return ret; } } - return 0; -} + if (imem->func->fini) + imem->func->fini(imem); -static int -nvkm_instmem_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_instmem *imem = nvkm_instmem(subdev); - if (imem->func->oneinit) - return imem->func->oneinit(imem); return 0; } @@ -273,22 +189,31 @@ nvkm_instmem_init(struct nvkm_subdev *subdev) { struct nvkm_instmem *imem = nvkm_instmem(subdev); struct nvkm_instobj *iobj; - int i; + + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + nvkm_bar_bar2_init(subdev->device); list_for_each_entry(iobj, &imem->list, head) { - if (iobj->suspend) { - struct nvkm_memory *memory = iobj->parent; - u64 size = nvkm_memory_size(memory); - for (i = 0; i < size; i += 4) - nvkm_wo32(memory, i, iobj->suspend[i / 4]); - vfree(iobj->suspend); - iobj->suspend = NULL; - } + if (iobj->suspend) + nvkm_instobj_load(iobj); } return 0; } +static int +nvkm_instmem_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_instmem *imem = nvkm_instmem(subdev); + if (imem->func->oneinit) + return imem->func->oneinit(imem); + return 0; +} + static void * nvkm_instmem_dtor(struct nvkm_subdev *subdev) { @@ -315,4 +240,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, imem->func = func; spin_lock_init(&imem->lock); INIT_LIST_HEAD(&imem->list); + INIT_LIST_HEAD(&imem->boot); } |