summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2015-01-13 17:04:21 +0300
committerBen Skeggs <bskeggs@redhat.com>2015-01-22 05:17:43 +0300
commitebb58dc2ef8c62d1affa28160f57faa7b0e1dc02 (patch)
tree9611cc71197328f8c9887bce0fafd0ed4633c012 /drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
parentf3867f439fd610db0cbcf1bb739001e95b7b25c6 (diff)
downloadlinux-ebb58dc2ef8c62d1affa28160f57faa7b0e1dc02.tar.xz
drm/nouveau/pmu: rename from pwr (no binary change)
Switch to NVIDIA's name for the device. The namespace of NVKM is being changed to nvkm_ instead of nouveau_, which will be used for the DRM part of the driver. This is being done in order to make it very clear as to what part of the driver a given symbol belongs to, and as a minor step towards splitting the DRM driver out to be able to stand on its own (for virt). Because there's already a large amount of churn here anyway, this is as good a time as any to also switch to NVIDIA's device and chipset naming to ease collaboration with them. A comparison of objdump disassemblies proves no code changes. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c201
1 files changed, 201 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
new file mode 100644
index 000000000000..671c7177d3af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -0,0 +1,201 @@
+#ifndef __NVKM_PMU_MEMX_H__
+#define __NVKM_PMU_MEMX_H__
+
+#include "priv.h"
+
+struct nouveau_memx {
+ struct nouveau_pmu *pmu;
+ u32 base;
+ u32 size;
+ struct {
+ u32 mthd;
+ u32 size;
+ u32 data[64];
+ } c;
+};
+
+static void
+memx_out(struct nouveau_memx *memx)
+{
+ struct nouveau_pmu *pmu = memx->pmu;
+ int i;
+
+ if (memx->c.mthd) {
+ nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+ for (i = 0; i < memx->c.size; i++)
+ nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+ memx->c.mthd = 0;
+ memx->c.size = 0;
+ }
+}
+
+static void
+memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
+{
+ if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
+ (memx->c.mthd && memx->c.mthd != mthd))
+ memx_out(memx);
+ memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
+ memx->c.size += size;
+ memx->c.mthd = mthd;
+}
+
+int
+nouveau_memx_init(struct nouveau_pmu *pmu, struct nouveau_memx **pmemx)
+{
+ struct nouveau_memx *memx;
+ u32 reply[2];
+ int ret;
+
+ ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_DATA, 0);
+ if (ret)
+ return ret;
+
+ memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
+ if (!memx)
+ return -ENOMEM;
+ memx->pmu = pmu;
+ memx->base = reply[0];
+ memx->size = reply[1];
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(pmu, 0x10a580, 0x00000003);
+ } while (nv_rd32(pmu, 0x10a580) != 0x00000003);
+ nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+
+ return 0;
+}
+
+int
+nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
+{
+ struct nouveau_memx *memx = *pmemx;
+ struct nouveau_pmu *pmu = memx->pmu;
+ u32 finish, reply[2];
+
+ /* flush the cache... */
+ memx_out(memx);
+
+ /* release data segment access */
+ finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
+ nv_wr32(pmu, 0x10a580, 0x00000000);
+
+ /* call MEMX process to execute the script, and wait for reply */
+ if (exec) {
+ pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+ memx->base, finish);
+ }
+
+ nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
+ reply[0], reply[1]);
+ kfree(memx);
+ return 0;
+}
+
+void
+nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
+{
+ nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+ memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
+}
+
+void
+nouveau_memx_wait(struct nouveau_memx *memx,
+ u32 addr, u32 mask, u32 data, u32 nsec)
+{
+ nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
+ addr, mask, data, nsec);
+ memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
+{
+ nv_debug(memx->pmu, " DELAY = %d ns\n", nsec);
+ memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_wait_vblank(struct nouveau_memx *memx)
+{
+ struct nouveau_pmu *pmu = memx->pmu;
+ u32 heads, x, y, px = 0;
+ int i, head_sync;
+
+ if (nv_device(pmu)->chipset < 0xd0) {
+ heads = nv_rd32(pmu, 0x610050);
+ for (i = 0; i < 2; i++) {
+ /* Heuristic: sync to head with biggest resolution */
+ if (heads & (2 << (i << 3))) {
+ x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+ y = (x & 0xffff0000) >> 16;
+ x &= 0x0000ffff;
+ if ((x * y) > px) {
+ px = (x * y);
+ head_sync = i;
+ }
+ }
+ }
+ }
+
+ if (px == 0) {
+ nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+ return;
+ }
+
+ nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+ memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_train(struct nouveau_memx *memx)
+{
+ nv_debug(memx->pmu, " MEM TRAIN\n");
+ memx_cmd(memx, MEMX_TRAIN, 0, NULL);
+}
+
+int
+nouveau_memx_train_result(struct nouveau_pmu *pmu, u32 *res, int rsize)
+{
+ u32 reply[2], base, size, i;
+ int ret;
+
+ ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_TRAIN, 0);
+ if (ret)
+ return ret;
+
+ base = reply[0];
+ size = reply[1] >> 2;
+ if (size > rsize)
+ return -ENOMEM;
+
+ /* read the packet */
+ nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+
+ for (i = 0; i < size; i++)
+ res[i] = nv_rd32(pmu, 0x10a1c4);
+
+ return 0;
+}
+
+void
+nouveau_memx_block(struct nouveau_memx *memx)
+{
+ nv_debug(memx->pmu, " HOST BLOCKED\n");
+ memx_cmd(memx, MEMX_ENTER, 0, NULL);
+}
+
+void
+nouveau_memx_unblock(struct nouveau_memx *memx)
+{
+ nv_debug(memx->pmu, " HOST UNBLOCKED\n");
+ memx_cmd(memx, MEMX_LEAVE, 0, NULL);
+}
+
+#endif