1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "a5xx_gpu.h"
static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "PFP state:\n");
for (i = 0; i < 36; i++) {
gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
}
}
static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "ME state:\n");
for (i = 0; i < 29; i++) {
gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
}
}
static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "MEQ state:\n");
gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
for (i = 0; i < 64; i++) {
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
}
}
static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "ROQ state:\n");
gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
for (i = 0; i < 512 / 4; i++) {
uint32_t val[4];
int j;
for (j = 0; j < 4; j++)
val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
val[0], val[1], val[2], val[3]);
}
}
static int show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_printer p = drm_seq_file_printer(m);
void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
node->info_ent->data;
show(priv->gpu, &p);
return 0;
}
#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
static struct drm_info_list a5xx_debugfs_list[] = {
ENT(pfp),
ENT(me),
ENT(meq),
ENT(roq),
};
/* for debugfs files that can be written to, we can't use drm helper: */
static int
reset_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
/* TODO do we care about trying to make sure the GPU is idle?
* Since this is just a debug feature limited to CAP_SYS_ADMIN,
* maybe it is fine to let the user keep both pieces if they
* try to reset an active GPU.
*/
mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
gpu->needs_hw_init = true;
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
mutex_unlock(&gpu->lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
if (!minor)
return;
dev = minor->dev;
drm_debugfs_create_files(a5xx_debugfs_list,
ARRAY_SIZE(a5xx_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
}
|