summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/etnaviv/etnaviv_dump.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-12-06 12:53:27 +0300
committerLucas Stach <l.stach@pengutronix.de>2018-02-12 18:31:01 +0300
commit6d7a20c0776036115c6e22bc673d645d524c4b8a (patch)
tree133109c3465b8db06286f8c7a24a24e6e291c3e7 /drivers/gpu/drm/etnaviv/etnaviv_dump.c
parente0580254ae5c70f9fad3e24c20b92287ad817d8e (diff)
downloadlinux-6d7a20c0776036115c6e22bc673d645d524c4b8a.tar.xz
drm/etnaviv: replace hangcheck with scheduler timeout
This replaces the etnaviv internal hangcheck logic with the job timeout handling provided by the DRM scheduler. This simplifies the driver further and allows to replay jobs after a GPU reset, so only minimal state is lost. This introduces a user-visible change in that we don't allow jobs to run indefinitely as long as they make progress anymore, as this introduces quality of service issues when multiple processes are using the GPU. Userspace is now responsible to flush jobs in a way that the finish in a reasonable time, where reasonable is currently defined as less than 500ms. Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_dump.c')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 6d0909c589d1..48aef6cf6a42 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -20,9 +20,13 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_sched.h"
#include "state.xml.h"
#include "state_hi.xml.h"
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
struct core_dump_iterator {
void *start;
struct etnaviv_dump_object_header *hdr;
@@ -121,10 +125,16 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
struct etnaviv_gem_submit *submit;
+ struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ /* Only catch the first event, or when manually re-armed */
+ if (!etnaviv_dump_core)
+ return;
+ etnaviv_dump_core = false;
+
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@@ -135,10 +145,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- list_for_each_entry(submit, &gpu->active_submit_list, node) {
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
+ spin_unlock(&gpu->sched.job_list_lock);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -180,10 +193,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- list_for_each_entry(submit, &gpu->active_submit_list, node)
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
+ }
+ spin_unlock(&gpu->sched.job_list_lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {