diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-08-20 07:54:12 +0300 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-08-28 05:40:23 +0300 |
commit | c19e329d663715014b367c4fedb217e0378342bf (patch) | |
tree | da0812b75b4d709105256a7f46cb3fa8f67654bf /drivers/gpu/drm | |
parent | 27cc60a17512055c63f05a27eb6687051ade5257 (diff) | |
download | linux-c19e329d663715014b367c4fedb217e0378342bf.tar.xz |
drm/nouveau/pmu: switch to subdev printk macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c | 28 |
4 files changed, 42 insertions, 34 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index 29c692c661da..247994017965 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -92,7 +92,8 @@ static void nvkm_pmu_recv(struct work_struct *work) { struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); - struct nvkm_device *device = pmu->subdev.device; + struct nvkm_subdev *subdev = &pmu->subdev; + struct nvkm_device *device = subdev->device; u32 process, message, data0, data1; /* nothing to do if GET == PUT */ @@ -132,12 +133,12 @@ nvkm_pmu_recv(struct work_struct *work) /* right now there's no other expected responses from the engine, * so assume that any unexpected message is an error. */ - nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n", - (char)((process & 0x000000ff) >> 0), - (char)((process & 0x0000ff00) >> 8), - (char)((process & 0x00ff0000) >> 16), - (char)((process & 0xff000000) >> 24), - process, message, data0, data1); + nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", + (char)((process & 0x000000ff) >> 0), + (char)((process & 0x0000ff00) >> 8), + (char)((process & 0x00ff0000) >> 16), + (char)((process & 0xff000000) >> 24), + process, message, data0, data1); } static void @@ -151,8 +152,9 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev) if (intr & 0x00000020) { u32 stat = nvkm_rd32(device, 0x10a16c); if (stat & 0x80000000) { - nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", - stat & 0x00ffffff, nvkm_rd32(device, 0x10a168)); + nvkm_error(subdev, "UAS fault at %06x addr %08x\n", + stat & 0x00ffffff, + nvkm_rd32(device, 0x10a168)); nvkm_wr32(device, 0x10a16c, 0x00000000); intr &= ~0x00000020; } @@ -165,14 +167,15 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev) } if (intr & 0x00000080) { - nv_info(pmu, "wr32 0x%06x 0x%08x\n", nvkm_rd32(device, 0x10a7a0), - nvkm_rd32(device, 0x10a7a4)); + nvkm_info(subdev, "wr32 %06x %08x\n", + nvkm_rd32(device, 0x10a7a0), + nvkm_rd32(device, 0x10a7a4)); nvkm_wr32(device, 0x10a004, 0x00000080); intr &= ~0x00000080; } if (intr) { - nv_error(pmu, "intr 0x%08x\n", intr); + nvkm_error(subdev, "intr %08x\n", intr); nvkm_wr32(device, 0x10a004, intr); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 469177e34563..bea283314240 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -81,7 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) if (nv_device_match(device, 0x11fc, 0x17aa, 0x2211) /* Lenovo W541 */ || nv_device_match(device, 0x11fc, 0x17aa, 0x221e) /* Lenovo W541 */ || nvkm_boolopt(device->cfgopt, "War00C800_0", false)) { - nv_info(pmu, "hw bug workaround enabled\n"); + nvkm_info(&pmu->subdev, "hw bug workaround enabled\n"); switch (device->chipset) { case 0xe4: magic(device, 0x04000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index 20087d53fb9c..1003013666a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c @@ -84,7 +84,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, level = min(clk->state_nr - 1, level); } - nv_trace(pmu, "cur level = %d, new level = %d\n", cur_level, level); + nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", + cur_level, level); *state = level; @@ -119,8 +120,10 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) container_of(alarm, struct gk20a_pmu, alarm); struct gk20a_pmu_dvfs_data *data = pmu->data; struct gk20a_pmu_dvfs_dev_status status; - struct nvkm_clk *clk = nvkm_clk(pmu); - struct nvkm_volt *volt = nvkm_volt(pmu); + struct nvkm_subdev *subdev = &pmu->base.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_clk *clk = device->clk; + struct nvkm_volt *volt = device->volt; u32 utilization = 0; int state, ret; @@ -133,7 +136,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status); if (ret) { - nv_warn(pmu, "failed to get device status\n"); + nvkm_warn(subdev, "failed to get device status\n"); goto resched; } @@ -142,17 +145,17 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) data->avg_load = (data->p_smooth * data->avg_load) + utilization; data->avg_load /= data->p_smooth + 1; - nv_trace(pmu, "utilization = %d %%, avg_load = %d %%\n", - utilization, data->avg_load); + nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n", + utilization, data->avg_load); ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state); if (ret) { - nv_warn(pmu, "failed to get current state\n"); + nvkm_warn(subdev, "failed to get current state\n"); goto resched; } if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { - nv_trace(pmu, "set new state to %d\n", state); + nvkm_trace(subdev, "set new state to %d\n", state); gk20a_pmu_dvfs_target(pmu, &state); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c index 846969884796..556aefe3614f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c @@ -72,7 +72,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) { struct nvkm_memx *memx = *pmemx; struct nvkm_pmu *pmu = memx->pmu; - struct nvkm_device *device = pmu->subdev.device; + struct nvkm_subdev *subdev = &pmu->subdev; + struct nvkm_device *device = subdev->device; u32 finish, reply[2]; /* flush the cache... */ @@ -88,8 +89,8 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) memx->base, finish); } - nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n", - reply[0], reply[1]); + nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n", + reply[0], reply[1]); kfree(memx); return 0; } @@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) void nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data) { - nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data); + nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data); memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data }); } @@ -105,8 +106,8 @@ void nvkm_memx_wait(struct nvkm_memx *memx, u32 addr, u32 mask, u32 data, u32 nsec) { - nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n", - addr, mask, data, nsec); + nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n", + addr, mask, data, nsec); memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec }); memx_out(memx); /* fuc can't handle multiple */ } @@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx, void nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) { - nv_debug(memx->pmu, " DELAY = %d ns\n", nsec); + nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec); memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec }); memx_out(memx); /* fuc can't handle multiple */ } @@ -122,7 +123,8 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) void nvkm_memx_wait_vblank(struct nvkm_memx *memx) { - struct nvkm_device *device = memx->pmu->subdev.device; + struct nvkm_subdev *subdev = &memx->pmu->subdev; + struct nvkm_device *device = subdev->device; u32 heads, x, y, px = 0; int i, head_sync; @@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx) } if (px == 0) { - nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n"); + nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n"); return; } - nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync); + nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync); memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync }); memx_out(memx); /* fuc can't handle multiple */ } @@ -155,7 +157,7 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx) void nvkm_memx_train(struct nvkm_memx *memx) { - nv_debug(memx->pmu, " MEM TRAIN\n"); + nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n"); memx_cmd(memx, MEMX_TRAIN, 0, NULL); } @@ -188,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize) void nvkm_memx_block(struct nvkm_memx *memx) { - nv_debug(memx->pmu, " HOST BLOCKED\n"); + nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n"); memx_cmd(memx, MEMX_ENTER, 0, NULL); } void nvkm_memx_unblock(struct nvkm_memx *memx) { - nv_debug(memx->pmu, " HOST UNBLOCKED\n"); + nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n"); memx_cmd(memx, MEMX_LEAVE, 0, NULL); } #endif |