diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv')
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 107 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_perfmon.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_sched.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/state_hi.xml.h | 23 |
14 files changed, 221 insertions, 133 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 384df1659be6..b13a17276d07 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, } else { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | - VIVS_GL_FLUSH_CACHE_COLOR); + VIVS_GL_FLUSH_CACHE_COLOR | + VIVS_GL_FLUSH_CACHE_SHADER_L1); if (has_blt) { CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index 721d633aece9..3a221923f15d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -5,13 +5,10 @@ #include <linux/dma-mapping.h> -#include <drm/drm_mm.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_gem.h" #include "etnaviv_gpu.h" #include "etnaviv_mmu.h" -#include "etnaviv_perfmon.h" #define SUBALLOC_SIZE SZ_512K #define SUBALLOC_GRANULE SZ_4K @@ -55,6 +52,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) return suballoc; free_suballoc: + mutex_destroy(&suballoc->lock); kfree(suballoc); return ERR_PTR(ret); @@ -79,6 +77,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) { dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr); + mutex_destroy(&suballoc->lock); kfree(suballoc); } @@ -100,7 +99,7 @@ retry: mutex_unlock(&suballoc->lock); ret = wait_event_interruptible_timeout(suballoc->free_event, suballoc->free_space, - msecs_to_jiffies(10 * 1000)); + secs_to_jiffies(10)); if (!ret) { dev_err(suballoc->dev, "Timeout waiting for cmdbuf space\n"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6500f3999c5f..3e91747ed339 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -488,7 +488,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = { ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_FOPS(fops); +static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + drm_show_memory_stats(p, file); +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + DRM_GEM_FOPS, + .show_fdinfo = drm_show_fdinfo, +}; static const struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, @@ -498,12 +507,12 @@ static const struct drm_driver etnaviv_drm_driver = { #ifdef CONFIG_DEBUG_FS .debugfs_init = etnaviv_debugfs_init, #endif + .show_fdinfo = etnaviv_show_fdinfo, .ioctls = etnaviv_ioctls, .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, .fops = &fops, .name = "etnaviv", .desc = "etnaviv DRM", - .date = "20151214", .major = 1, .minor = 4, }; @@ -538,6 +547,16 @@ static int etnaviv_bind(struct device *dev) priv->num_gpus = 0; priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(dev)) { + priv->shm_gfp_mask |= GFP_DMA32; + priv->shm_gfp_mask &= ~__GFP_HIGHMEM; + } + priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); @@ -564,6 +583,7 @@ out_unbind: out_destroy_suballoc: etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); out_free_priv: + mutex_destroy(&priv->gem_lock); kfree(priv); out_put: drm_dev_put(drm); @@ -608,7 +628,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; - drm_of_component_match_add(&pdev->dev, &match, + drm_of_component_match_add(dev, &match, component_compare_of, core_node); } } else { @@ -631,9 +651,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) * bit to make sure we are allocating the command buffers and * TLBs in the lower 4 GiB address space. */ - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dev_dbg(&pdev->dev, "No suitable DMA available\n"); + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + dev_dbg(dev, "No suitable DMA available\n"); return -ENODEV; } @@ -644,7 +664,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) */ first_node = etnaviv_of_first_available_node(); if (first_node) { - of_dma_configure(&pdev->dev, first_node, true); + of_dma_configure(dev, first_node, true); of_node_put(first_node); } @@ -658,7 +678,7 @@ static void etnaviv_pdev_remove(struct platform_device *pdev) static struct platform_driver etnaviv_platform_driver = { .probe = etnaviv_pdev_probe, - .remove_new = etnaviv_pdev_remove, + .remove = etnaviv_pdev_remove, .driver = { .name = "etnaviv", }, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c0c9d4e3be1..2f844e82bc46 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj) static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) { struct page **pages; + pgprot_t prot; lockdep_assert_held(&obj->lock); @@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) if (IS_ERR(pages)) return NULL; - return vmap(pages, obj->base.size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + switch (obj->flags & ETNA_BO_CACHE_MASK) { + case ETNA_BO_CACHED: + prot = PAGE_KERNEL; + break; + case ETNA_BO_UNCACHED: + prot = pgprot_noncached(PAGE_KERNEL); + break; + case ETNA_BO_WC: + default: + prot = pgprot_writecombine(PAGE_KERNEL); + } + + return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot); } static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) @@ -514,6 +526,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) etnaviv_obj->ops->release(etnaviv_obj); drm_gem_object_release(obj); + mutex_destroy(&etnaviv_obj->lock); kfree(etnaviv_obj); } @@ -527,6 +540,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) mutex_unlock(&priv->gem_lock); } +static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + enum drm_gem_object_status status = 0; + + if (etnaviv_obj->pages) + status |= DRM_GEM_OBJECT_RESIDENT; + + return status; +} + static const struct vm_operations_struct vm_ops = { .fault = etnaviv_gem_fault, .open = drm_gem_vm_open, @@ -540,10 +564,11 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .get_sg_table = etnaviv_gem_prime_get_sg_table, .vmap = etnaviv_gem_prime_vmap, .mmap = etnaviv_gem_mmap, + .status = etnaviv_gem_status, .vm_ops = &vm_ops, }; -static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { struct etnaviv_gem_object *etnaviv_obj; @@ -570,6 +595,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, if (!etnaviv_obj) return -ENOMEM; + etnaviv_obj->size = ALIGN(size, SZ_4K); etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; @@ -590,15 +616,13 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj = NULL; int ret; - size = PAGE_ALIGN(size); - - ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj); if (ret) goto fail; lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size)); if (ret) goto fail; @@ -627,7 +651,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, struct drm_gem_object *obj; int ret; - ret = etnaviv_gem_new_impl(dev, flags, ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); if (ret) return ret; @@ -686,7 +710,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) kfree(etnaviv_obj->sgt); } if (etnaviv_obj->pages) { - int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT; unpin_user_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a42d260cac2c..e5ee82a0674c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -36,12 +36,15 @@ struct etnaviv_gem_object { const struct etnaviv_gem_ops *ops; struct mutex lock; + /* + * The actual size that is visible to the GPU, not necessarily + * PAGE_SIZE aligned, but should be aligned to GPU page size. + */ + u32 size; u32 flags; struct list_head gem_node; - struct etnaviv_gpu *gpu; /* non-null if active */ atomic_t gpu_active; - u32 access; struct page **pages; struct sg_table *sgt; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 3524b5811682..917ad527c961 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -10,14 +10,14 @@ #include "etnaviv_drv.h" #include "etnaviv_gem.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + unsigned int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); @@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) int etnaviv_gem_prime_pin(struct drm_gem_object *obj) { - if (!obj->import_attach) { + if (!drm_gem_is_imported(obj)) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); mutex_lock(&etnaviv_obj->lock); @@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj) void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) { - if (!obj->import_attach) { + if (!drm_gem_is_imported(obj)) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); mutex_lock(&etnaviv_obj->lock); @@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) - dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); + dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) lockdep_assert_held(&etnaviv_obj->lock); - ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); + ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); if (ret) return NULL; return map.vaddr; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3d0f8d182506..3c0a5c3e0e3d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -6,7 +6,6 @@ #include <drm/drm_file.h> #include <linux/dma-fence-array.h> #include <linux/file.h> -#include <linux/pm_runtime.h> #include <linux/dma-resv.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 7c7f97793ddd..cf0d9049bcf1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> +#include <linux/reset.h> #include <linux/thermal.h> #include "etnaviv_cmdbuf.h" @@ -172,6 +173,29 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) return 0; } +static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu) +{ + int ret; + + /* + * 32 core clock cycles (slowest clock) required before deassertion + * 1 microsecond might match all implementations without computation + */ + usleep_range(1, 2); + + ret = reset_control_deassert(gpu->rst); + if (ret) + return ret; + + /* + * 128 core clock cycles (slowest clock) required before any activity on AHB + * 1 microsecond might match all implementations without computation + */ + usleep_range(1, 2); + + return 0; +} + static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision) { return gpu->identity.model == model && @@ -574,8 +598,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) continue; } - /* disable debug registers, as they are not normally needed */ - control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; + /* enable debug register access */ + control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); failed = false; @@ -799,6 +823,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) goto pm_put; } + ret = etnaviv_gpu_reset_deassert(gpu); + if (ret) { + dev_err(gpu->dev, "GPU reset deassert failed\n"); + goto fail; + } + etnaviv_hw_identify(gpu); if (gpu->identity.model == 0) { @@ -839,17 +869,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ - ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, - PAGE_SIZE); + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); goto fail; @@ -1330,17 +1351,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); - /* enable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1350,23 +1370,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); + /* enable clock gating */ + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); + val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + for (i = 0; i < submit->nr_pmrs; i++) { const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; *pmr->bo_vma = pmr->sequence; } - - /* disable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - - /* enable clock gating */ - val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); - val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); } @@ -1862,7 +1881,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (!gpu) return -ENOMEM; - gpu->dev = &pdev->dev; + gpu->dev = dev; mutex_init(&gpu->lock); mutex_init(&gpu->sched_lock); @@ -1871,13 +1890,24 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (IS_ERR(gpu->mmio)) return PTR_ERR(gpu->mmio); + + /* Get Reset: */ + gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(gpu->rst)) + return dev_err_probe(dev, PTR_ERR(gpu->rst), + "failed to get reset\n"); + + err = reset_control_assert(gpu->rst); + if (err) + return dev_err_probe(dev, err, "failed to assert reset\n"); + /* Get Interrupt: */ gpu->irq = platform_get_irq(pdev, 0); if (gpu->irq < 0) return gpu->irq; - err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, - dev_name(gpu->dev), gpu); + err = devm_request_irq(dev, gpu->irq, irq_handler, 0, + dev_name(dev), gpu); if (err) { dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); return err; @@ -1914,13 +1944,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) * autosuspend delay is rather arbitary: no measurements have * yet been performed to determine an appropriate value. */ - pm_runtime_use_autosuspend(gpu->dev); - pm_runtime_set_autosuspend_delay(gpu->dev, 200); - pm_runtime_enable(gpu->dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 200); + pm_runtime_enable(dev); - err = component_add(&pdev->dev, &gpu_ops); + err = component_add(dev, &gpu_ops); if (err < 0) { - dev_err(&pdev->dev, "failed to register component: %d\n", err); + dev_err(dev, "failed to register component: %d\n", err); return err; } @@ -1929,8 +1959,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { + struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev); + component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); + + mutex_destroy(&gpu->lock); + mutex_destroy(&gpu->sched_lock); } static int etnaviv_gpu_rpm_suspend(struct device *dev) @@ -1991,6 +2026,6 @@ struct platform_driver etnaviv_gpu_driver = { .of_match_table = etnaviv_gpu_match, }, .probe = etnaviv_gpu_platform_probe, - .remove_new = etnaviv_gpu_platform_remove, + .remove = etnaviv_gpu_platform_remove, .id_table = gpu_ids, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 31322195b9e4..5cb46c84e03a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -93,6 +93,7 @@ struct etnaviv_event { struct etnaviv_cmdbuf_suballoc; struct regulator; struct clk; +struct reset_control; #define ETNA_NR_EVENTS 30 @@ -144,6 +145,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_primid; u32 hangcheck_fence; void __iomem *mmio; @@ -157,6 +159,7 @@ struct etnaviv_gpu { struct clk *clk_reg; struct clk *clk_core; struct clk *clk_shader; + struct reset_control *rst; unsigned int freq_scale; unsigned int fe_waitcycles; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 1661d589bf3e..df5192083b20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, size_t unmapped_page, unmapped = 0; size_t pgsize = SZ_4K; - if (!IS_ALIGNED(iova | size, pgsize)) { - pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", - iova, size, pgsize); - return; - } - while (unmapped < size) { unmapped_page = context->global->ops->unmap(context, iova, pgsize); @@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, size_t orig_size = size; int ret = 0; - if (!IS_ALIGNED(iova | paddr | size, pgsize)) { - pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", - iova, &paddr, size, pgsize); - return -EINVAL; - } - while (size) { ret = context->global->ops->map(context, iova, paddr, pgsize, prot); @@ -69,9 +57,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, struct sg_table *sgt, int prot) -{ struct scatterlist *sg; +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -80,15 +70,25 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return -EINVAL; for_each_sgtable_dma_sg(sgt, sg, i) { - phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + phys_addr_t pa = sg_dma_address(sg); + unsigned int da_len = sg_dma_len(sg); + unsigned int bytes = min_t(unsigned int, da_len, va_len); + + VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) { + dev_err(context->global->dev, + "unaligned: iova 0x%x pa %pa size 0x%x\n", + iova, &pa, bytes); + ret = -EINVAL; + goto fail; + } ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +104,7 @@ fail: static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +117,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +291,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -358,7 +342,7 @@ static void etnaviv_iommu_context_free(struct kref *kref) container_of(kref, struct etnaviv_iommu_context, refcount); etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); - + mutex_destroy(&context->lock); context->global->ops->free(context); } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index c01a147f0dfd..7f8ac0178547 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -61,7 +61,6 @@ struct etnaviv_iommu_global { /* P(age) T(able) A(rray) */ u64 *pta_cpu; dma_addr_t pta_dma; - struct spinlock pta_lock; DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES); } v2; }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index dc9dea664a28..d53a5c293373 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -62,6 +62,8 @@ static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += perf_reg_read(gpu, domain, signal); @@ -81,6 +83,8 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += gpu_read(gpu, signal->data); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index ab9ca4824b62..71e2e6b9d713 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -11,6 +11,7 @@ #include "etnaviv_gpu.h" #include "etnaviv_sched.h" #include "state.xml.h" +#include "state_hi.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -34,8 +35,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); + struct drm_gpu_scheduler *sched = sched_job->sched; struct etnaviv_gpu *gpu = submit->gpu; - u32 dma_addr; + u32 dma_addr, primid = 0; int change; /* @@ -52,10 +54,22 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; + if (submit->exec_state == ETNA_PIPE_3D) { + /* guard against concurrent usage from perfmon_sample */ + mutex_lock(&gpu->lock); + gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, + VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM << + VIVS_MC_PROFILE_CONFIG0_FE__SHIFT); + primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); + mutex_unlock(&gpu->lock); + } if (gpu->state == ETNA_GPU_STATE_RUNNING && (gpu->completed_fence != gpu->hangcheck_fence || - change < 0 || change > 16)) { + change < 0 || change > 16 || + (submit->exec_state == ETNA_PIPE_3D && + gpu->hangcheck_primid != primid))) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_primid = primid; gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } @@ -72,11 +86,13 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); - drm_sched_start(&gpu->sched); + drm_sched_start(&gpu->sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: - list_add(&sched_job->list, &sched_job->sched->pending_list); + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -131,17 +147,17 @@ out_unlock: int etnaviv_sched_init(struct etnaviv_gpu *gpu) { - int ret; - - ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, - msecs_to_jiffies(500), NULL, NULL, - dev_name(gpu->dev), gpu->dev); - if (ret) - return ret; - - return 0; + const struct drm_sched_init_args args = { + .ops = &etnaviv_sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = etnaviv_hw_jobs_limit, + .hang_limit = etnaviv_job_hang_limit, + .timeout = msecs_to_jiffies(500), + .name = dev_name(gpu->dev), + .dev = gpu->dev, + }; + + return drm_sched_init(&gpu->sched, &args); } void etnaviv_sched_fini(struct etnaviv_gpu *gpu) diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 829bc528e618..f7bc5f6e20ff 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state.xml ( 29355 bytes, from 2024-01-19 10:18:54) -- common.xml ( 35664 bytes, from 2023-12-06 10:55:32) -- common_3d.xml ( 15069 bytes, from 2023-11-22 10:05:24) -- state_hi.xml ( 35854 bytes, from 2023-12-11 15:50:17) -- copyright.xml ( 1597 bytes, from 2016-11-10 13:58:32) -- state_2d.xml ( 52271 bytes, from 2023-06-02 12:35:03) -- state_3d.xml ( 89522 bytes, from 2024-01-19 10:18:54) -- state_blt.xml ( 14592 bytes, from 2023-11-22 10:05:09) -- state_vg.xml ( 5975 bytes, from 2016-11-10 13:58:32) - -Copyright (C) 2012-2023 by the following authors: +- state.xml ( 30729 bytes, from 2024-06-21 11:31:54) +- common.xml ( 35664 bytes, from 2023-12-13 09:33:18) +- common_3d.xml ( 15069 bytes, from 2023-12-13 09:33:18) +- state_hi.xml ( 35909 bytes, from 2024-06-21 11:31:54) +- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03) +- state_2d.xml ( 52271 bytes, from 2023-05-30 20:50:02) +- state_3d.xml ( 89626 bytes, from 2024-06-21 11:32:57) +- state_blt.xml ( 14592 bytes, from 2023-12-13 09:33:18) +- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03) + +Copyright (C) 2012-2024 by the following authors: - Wladimir J. van der Laan <laanwj@gmail.com> - Christian Gmeiner <christian.gmeiner@gmail.com> - Lucas Stach <l.stach@pengutronix.de> @@ -467,6 +467,7 @@ DEALINGS IN THE SOFTWARE. #define VIVS_MC_PROFILE_CONFIG0 0x00000470 #define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff #define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM 0x00000009 #define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a #define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b #define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c |