diff options
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_gem.c')
-rw-r--r-- | drivers/gpu/drm/panthor/panthor_gem.c | 227 |
1 files changed, 213 insertions, 14 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 8244a4e6c2a2..7c00fd77758b 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -2,6 +2,7 @@ /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ /* Copyright 2023 Collabora ltd. */ +#include <linux/cleanup.h> #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/err.h> @@ -10,14 +11,64 @@ #include <drm/panthor_drm.h> #include "panthor_device.h" +#include "panthor_fw.h" #include "panthor_gem.h" #include "panthor_mmu.h" +#ifdef CONFIG_DEBUG_FS +static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, + struct panthor_gem_object *bo) +{ + INIT_LIST_HEAD(&bo->debugfs.node); + + bo->debugfs.creator.tgid = current->group_leader->pid; + get_task_comm(bo->debugfs.creator.process_name, current->group_leader); + + mutex_lock(&ptdev->gems.lock); + list_add_tail(&bo->debugfs.node, &ptdev->gems.node); + mutex_unlock(&ptdev->gems.lock); +} + +static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) +{ + struct panthor_device *ptdev = container_of(bo->base.base.dev, + struct panthor_device, base); + + if (list_empty(&bo->debugfs.node)) + return; + + mutex_lock(&ptdev->gems.lock); + list_del_init(&bo->debugfs.node); + mutex_unlock(&ptdev->gems.lock); +} + +static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) +{ + bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; +} +#else +static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, + struct panthor_gem_object *bo) +{} +static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} +static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {} +#endif + static void panthor_gem_free_object(struct drm_gem_object *obj) { struct panthor_gem_object *bo = to_panthor_bo(obj); struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem; + panthor_gem_debugfs_bo_rm(bo); + + /* + * Label might have been allocated with kstrdup_const(), + * we need to take that into account when freeing the memory + */ + kfree_const(bo->label.str); + + mutex_destroy(&bo->label.lock); + drm_gem_free_mmap_offset(&bo->base.base); mutex_destroy(&bo->gpuva_list_lock); drm_gem_shmem_free(&bo->base); @@ -67,17 +118,19 @@ out_free_bo: * @gpu_va: GPU address assigned when mapping to the VM. * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be * automatically allocated. + * @name: Descriptive label of the BO's contents * * Return: A valid pointer in case of success, an ERR_PTR() otherwise. */ struct panthor_kernel_bo * panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, size_t size, u32 bo_flags, u32 vm_map_flags, - u64 gpu_va) + u64 gpu_va, const char *name) { struct drm_gem_shmem_object *obj; struct panthor_kernel_bo *kbo; struct panthor_gem_object *bo; + u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL; int ret; if (drm_WARN_ON(&ptdev->base, !vm)) @@ -97,6 +150,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, kbo->obj = &obj->base; bo->flags = bo_flags; + if (vm == panthor_fw_vm(ptdev)) + debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; + + panthor_gem_kernel_bo_set_label(kbo, name); + panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags); + /* The system and GPU MMU page size might differ, which becomes a * problem for FW sections that need to be mapped at explicit address * since our PAGE_SIZE alignment might cover a VA range that's @@ -129,17 +188,6 @@ err_free_bo: return ERR_PTR(ret); } -static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - struct panthor_gem_object *bo = to_panthor_bo(obj); - - /* Don't allow mmap on objects that have the NO_MMAP flag set. */ - if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) - return -EINVAL; - - return drm_gem_shmem_object_mmap(obj, vma); -} - static struct dma_buf * panthor_gem_prime_export(struct drm_gem_object *obj, int flags) { @@ -155,7 +203,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) struct panthor_gem_object *bo = to_panthor_bo(obj); enum drm_gem_object_status res = 0; - if (bo->base.base.import_attach || bo->base.pages) + if (drm_gem_is_imported(&bo->base.base) || bo->base.pages) res |= DRM_GEM_OBJECT_RESIDENT; return res; @@ -169,7 +217,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = { .get_sg_table = drm_gem_shmem_object_get_sg_table, .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, - .mmap = panthor_gem_mmap, + .mmap = drm_gem_shmem_object_mmap, .status = panthor_gem_status, .export = panthor_gem_prime_export, .vm_ops = &drm_gem_shmem_vm_ops, @@ -196,6 +244,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t obj->base.map_wc = !ptdev->coherent; mutex_init(&obj->gpuva_list_lock); drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); + mutex_init(&obj->label.lock); + + panthor_gem_debugfs_bo_add(ptdev, obj); return &obj->base.base; } @@ -245,5 +296,153 @@ panthor_gem_create_with_handle(struct drm_file *file, /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base); + /* + * No explicit flags are needed in the call below, since the + * function internally sets the INITIALIZED bit for us. + */ + panthor_gem_debugfs_set_usage_flags(bo, 0); + return ret; } + +void +panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label) +{ + struct panthor_gem_object *bo = to_panthor_bo(obj); + const char *old_label; + + scoped_guard(mutex, &bo->label.lock) { + old_label = bo->label.str; + bo->label.str = label; + } + + kfree_const(old_label); +} + +void +panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label) +{ + const char *str; + + /* We should never attempt labelling a UM-exposed GEM object */ + if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0)) + return; + + if (!label) + return; + + str = kstrdup_const(label, GFP_KERNEL); + if (!str) { + /* Failing to allocate memory for a label isn't a fatal condition */ + drm_warn(bo->obj->dev, "Not enough memory to allocate BO label"); + return; + } + + panthor_gem_bo_set_label(bo->obj, str); +} + +#ifdef CONFIG_DEBUG_FS +struct gem_size_totals { + size_t size; + size_t resident; + size_t reclaimable; +}; + +static void panthor_gem_debugfs_print_flag_names(struct seq_file *m) +{ + int len; + int i; + + static const char * const gem_state_flags_names[] = { + [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported", + [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported", + }; + + static const char * const gem_usage_flags_names[] = { + [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel", + [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped", + }; + + seq_puts(m, "GEM state flags: "); + for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) { + if (!gem_state_flags_names[i]) + continue; + seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i], + (u32)BIT(i), (i < len - 1) ? ", " : "\n"); + } + + seq_puts(m, "GEM usage flags: "); + for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) { + if (!gem_usage_flags_names[i]) + continue; + seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i], + (u32)BIT(i), (i < len - 1) ? ", " : "\n\n"); + } +} + +static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo, + struct seq_file *m, + struct gem_size_totals *totals) +{ + unsigned int refcount = kref_read(&bo->base.base.refcount); + char creator_info[32] = {}; + size_t resident_size; + u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; + u32 gem_state_flags = 0; + + /* Skip BOs being destroyed. */ + if (!refcount) + return; + + resident_size = bo->base.pages ? bo->base.base.size : 0; + + snprintf(creator_info, sizeof(creator_info), + "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); + seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx", + creator_info, + bo->base.base.name, + refcount, + bo->base.base.size, + resident_size, + drm_vma_node_start(&bo->base.base.vma_node)); + + if (bo->base.base.import_attach) + gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED; + if (bo->base.base.dma_buf) + gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED; + + seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags); + + scoped_guard(mutex, &bo->label.lock) { + seq_printf(m, "%s\n", bo->label.str ? : ""); + } + + totals->size += bo->base.base.size; + totals->resident += resident_size; + if (bo->base.madv > 0) + totals->reclaimable += resident_size; +} + +void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev, + struct seq_file *m) +{ + struct gem_size_totals totals = {0}; + struct panthor_gem_object *bo; + + panthor_gem_debugfs_print_flag_names(m); + + seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n"); + seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n"); + + scoped_guard(mutex, &ptdev->gems.lock) { + list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { + if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED) + panthor_gem_debugfs_bo_print(bo, m, &totals); + } + } + + seq_puts(m, "==============================================================================================================================================\n"); + seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n", + totals.size, totals.resident, totals.reclaimable); +} +#endif |