diff options
Diffstat (limited to 'drivers/gpu/drm/panfrost')
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_drv.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_job.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 |
4 files changed, 39 insertions, 20 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index bbada731bbbd..a2ab99698ca8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -4,8 +4,9 @@ /* Copyright 2019 Collabora ltd. */ #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/pagemap.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/panfrost_drm.h> #include <drm/drm_drv.h> @@ -407,6 +408,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, bo = to_panfrost_bo(gem_obj); + ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); + if (ret) + goto out_put_object; + mutex_lock(&pfdev->shrinker_lock); mutex_lock(&bo->mappings.lock); if (args->madv == PANFROST_MADV_DONTNEED) { @@ -444,7 +449,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, out_unlock_mappings: mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); - + dma_resv_unlock(bo->base.base.resv); +out_put_object: drm_gem_object_put(gem_obj); return ret; } @@ -539,10 +545,7 @@ static const struct drm_driver panfrost_drm_driver = { .minor = 2, .gem_create_object = panfrost_gem_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, - .gem_prime_mmap = drm_gem_prime_mmap, }; static int panfrost_probe(struct platform_device *pdev) @@ -611,7 +614,7 @@ err_out0: return err; } -static int panfrost_remove(struct platform_device *pdev) +static void panfrost_remove(struct platform_device *pdev) { struct panfrost_device *pfdev = platform_get_drvdata(pdev); struct drm_device *ddev = pfdev->ddev; @@ -625,7 +628,6 @@ static int panfrost_remove(struct platform_device *pdev) pm_runtime_set_suspended(pfdev->dev); drm_dev_put(ddev); - return 0; } /* @@ -717,7 +719,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver panfrost_driver = { .probe = panfrost_probe, - .remove = panfrost_remove, + .remove_new = panfrost_remove, .driver = { .name = "panfrost", .pm = pm_ptr(&panfrost_pm_ops), diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index bf0170782f25..6a71a2555f85 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) if (!mutex_trylock(&bo->mappings.lock)) return false; - if (!mutex_trylock(&shmem->pages_lock)) + if (!dma_resv_trylock(shmem->base.resv)) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(&bo->base); + drm_gem_shmem_purge(&bo->base); ret = true; - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); unlock_mappings: mutex_unlock(&bo->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index dbc597ab46fb..a8b4827dc425 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -720,6 +720,22 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job if (dma_fence_is_signaled(job->done_fence)) return DRM_GPU_SCHED_STAT_NOMINAL; + /* + * Panfrost IRQ handler may take a long time to process an interrupt + * if there is another IRQ handler hogging the processing. + * For example, the HDMI encoder driver might be stuck in the IRQ + * handler for a significant time in a case of bad cable connection. + * In order to catch such cases and not report spurious Panfrost + * job timeouts, synchronize the IRQ handler and re-check the fence + * status. + */ + synchronize_irq(pfdev->js->irq); + + if (dma_fence_is_signaled(job->done_fence)) { + dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); + return DRM_GPU_SCHED_STAT_NOMINAL; + } + dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, job_read(pfdev, JS_CONFIG(js)), diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e961fa27702c..c0123d09f699 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; + struct drm_gem_object *obj; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; @@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, page_offset = addr >> PAGE_SHIFT; page_offset -= bomapping->mmnode.start; - mutex_lock(&bo->base.pages_lock); + obj = &bo->base.base; + + dma_resv_lock(obj->resv, NULL); if (!bo->base.pages) { bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (!pages) { kvfree(bo->sgts); bo->sgts = NULL; - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } bo->base.pages = pages; bo->base.pages_use_count = 1; @@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = bo->base.pages; if (pages[page_offset]) { /* Pages are already mapped, bail out. */ - mutex_unlock(&bo->base.pages_lock); goto out; } } @@ -502,15 +502,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { pages[i] = shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) { - mutex_unlock(&bo->base.pages_lock); ret = PTR_ERR(pages[i]); pages[i] = NULL; goto err_pages; } } - mutex_unlock(&bo->base.pages_lock); - sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); @@ -529,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); out: + dma_resv_unlock(obj->resv); + panfrost_gem_mapping_put(bomapping); return 0; @@ -537,6 +536,8 @@ err_map: sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); +err_unlock: + dma_resv_unlock(obj->resv); err_bo: panfrost_gem_mapping_put(bomapping); return ret; |