summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_ttm.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2015-01-14 08:36:34 +0300
committerBen Skeggs <bskeggs@redhat.com>2015-01-22 05:18:07 +0300
commitbe83cd4ef9a2a56bd35550bf96146b7b837daf02 (patch)
tree7cd92cdbb6c9fa473f4b896e8bd22e9dc8cbc28e /drivers/gpu/drm/nouveau/nouveau_ttm.c
parent9719047b4d985ca7a46819956047cca04213d63e (diff)
downloadlinux-be83cd4ef9a2a56bd35550bf96146b7b837daf02.tar.xz
drm/nouveau: finalise nvkm namespace switch (no binary change)
The namespace of NVKM is being changed to nvkm_ instead of nouveau_, which will be used for the DRM part of the driver. This is being done in order to make it very clear as to what part of the driver a given symbol belongs to, and as a minor step towards splitting the DRM driver out to be able to stand on its own (for virt). Because there's already a large amount of churn here anyway, this is as good a time as any to also switch to NVIDIA's device and chipset naming to ease collaboration with them. A comparison of objdump disassemblies proves no code changes. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_ttm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7149f6d1fb43..273e50110ec3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,7 +33,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
man->priv = pfb;
return 0;
}
@@ -46,16 +46,16 @@ nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
}
static inline void
-nouveau_mem_node_cleanup(struct nouveau_mem *node)
+nvkm_mem_node_cleanup(struct nvkm_mem *node)
{
if (node->vma[0].node) {
- nouveau_vm_unmap(&node->vma[0]);
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_unmap(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
- nouveau_vm_unmap(&node->vma[1]);
- nouveau_vm_put(&node->vma[1]);
+ nvkm_vm_unmap(&node->vma[1]);
+ nvkm_vm_put(&node->vma[1]);
}
}
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvxx_fb(&drm->device);
- nouveau_mem_node_cleanup(mem->mm_node);
- pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ nvkm_mem_node_cleanup(mem->mm_node);
+ pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
}
static int
@@ -76,9 +76,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
u32 size_nc = 0;
int ret;
@@ -103,9 +103,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
- struct nouveau_fb *pfb = man->priv;
- struct nouveau_mm *mm = &pfb->vram;
- struct nouveau_mm_node *r;
+ struct nvkm_fb *pfb = man->priv;
+ struct nvkm_mm *mm = &pfb->vram;
+ struct nvkm_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&nv_subdev(pfb)->mutex);
@@ -150,7 +150,7 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- nouveau_mem_node_cleanup(mem->mm_node);
+ nvkm_mem_node_cleanup(mem->mm_node);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -163,7 +163,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
@@ -208,10 +208,10 @@ static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_mmu *mmu = nvxx_mmu(&drm->device);
+ struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
struct nv04_mmu_priv *priv = (void *)mmu;
- struct nouveau_vm *vm = NULL;
- nouveau_vm_ref(priv->vm, &vm, NULL);
+ struct nvkm_vm *vm = NULL;
+ nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
@@ -219,8 +219,8 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
static int
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
{
- struct nouveau_vm *vm = man->priv;
- nouveau_vm_ref(NULL, &vm, NULL);
+ struct nvkm_vm *vm = man->priv;
+ nvkm_vm_ref(NULL, &vm, NULL);
man->priv = NULL;
return 0;
}
@@ -228,9 +228,9 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
if (node->vma[0].node)
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -241,7 +241,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -250,8 +250,8 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
- NV_MEM_ACCESS_RW, &node->vma[0]);
+ ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+ NV_MEM_ACCESS_RW, &node->vma[0]);
if (ret) {
kfree(node);
return ret;