diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2018-05-08 13:39:47 +0300 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2018-05-18 08:01:28 +0300 |
commit | 1590700d94ac53772491ed3103a4e8b8de01640a (patch) | |
tree | 1e95b729248ad4fb377e72f9dc8d4013572a62e9 /drivers/gpu/drm/nouveau/dispnv50 | |
parent | 0a3687716bb0a53a363b63cb5ba2bddc14c3bd2a (diff) | |
download | linux-1590700d94ac53772491ed3103a4e8b8de01640a.tar.xz |
drm/nouveau/kms/nv50-: split each resource type into their own source files
There should be no code changes here, just shuffling stuff around.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/dispnv50')
27 files changed, 2967 insertions, 2482 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild index 43fc8be49391..f3877d2d8840 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild +++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild @@ -1 +1,27 @@ nouveau-y += dispnv50/disp.o + +nouveau-y += dispnv50/core.o +nouveau-y += dispnv50/core507d.o + +nouveau-y += dispnv50/dac507d.o + +nouveau-y += dispnv50/pior507d.o + +nouveau-y += dispnv50/sor507d.o + +nouveau-y += dispnv50/head.o +nouveau-y += dispnv50/head507d.o + +nouveau-y += dispnv50/wndw.o + +nouveau-y += dispnv50/base.o +nouveau-y += dispnv50/base507c.o + +nouveau-y += dispnv50/curs.o +nouveau-y += dispnv50/curs507a.o + +nouveau-y += dispnv50/oimm.o +nouveau-y += dispnv50/oimm507b.o + +nouveau-y += dispnv50/ovly.o +nouveau-y += dispnv50/ovly507e.o diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h new file mode 100644 index 000000000000..8c97e25c881f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -0,0 +1,207 @@ +#ifndef __NV50_KMS_ATOM_H__ +#define __NV50_KMS_ATOM_H__ +#define nv50_atom(p) container_of((p), struct nv50_atom, state) +#include <drm/drm_atomic.h> + +struct nv50_atom { + struct drm_atomic_state state; + + struct list_head outp; + bool lock_core; + bool flush_disable; +}; + +#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state) + +struct nv50_head_atom { + struct drm_crtc_state state; + + struct { + u16 iW; + u16 iH; + u16 oW; + u16 oH; + } view; + + struct nv50_head_mode { + bool interlace; + u32 clock; + struct { + u16 active; + u16 synce; + u16 blanke; + u16 blanks; + } h; + struct { + u32 active; + u16 synce; + u16 blanke; + u16 blanks; + u16 blank2s; + u16 blank2e; + u16 blankus; + } v; + } mode; + + struct { + bool visible; + u32 handle; + u64 offset:40; + u8 mode:4; + } ilut; + + struct { + bool visible; + u32 handle; + u64 offset:40; + u8 format; + u8 kind:7; + u8 layout:1; + u8 block:4; + u32 pitch:20; + u16 x; + u16 y; + u16 w; + u16 h; + } core; + + struct { + bool visible; + u32 handle; + u64 offset:40; + u8 layout:1; + u8 format:1; + } curs; + + struct { + u8 depth; + u8 cpp; + u16 x; + u16 y; + u16 w; + u16 h; + } base; + + struct { + u8 cpp; + } ovly; + + struct { + bool enable:1; + u8 bits:2; + u8 mode:4; + } dither; + + struct { + struct { + u16 cos:12; + u16 sin:12; + } sat; + } procamp; + + struct { + u8 nhsync:1; + u8 nvsync:1; + u8 depth:4; + } or; + + union { + struct { + bool ilut:1; + bool core:1; + bool curs:1; + }; + u8 mask; + } clr; + + union { + struct { + bool ilut:1; + bool core:1; + bool curs:1; + bool view:1; + bool mode:1; + bool base:1; + bool ovly:1; + bool dither:1; + bool procamp:1; + bool or:1; + }; + u16 mask; + } set; +}; + +static inline struct nv50_head_atom * +nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(statec)) + return (void *)statec; + return nv50_head_atom(statec); +} + +#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state) + +struct nv50_wndw_atom { + struct drm_plane_state state; + u8 interval; + + struct { + u32 handle; + u16 offset:12; + bool awaken:1; + } ntfy; + + struct { + u32 handle; + u16 offset:12; + u32 acquire; + u32 release; + } sema; + + struct { + u8 enable:2; + } lut; + + struct { + u8 mode:2; + u8 interval:4; + + u8 format; + u8 kind:7; + u8 layout:1; + u8 block:4; + u32 pitch:20; + u16 w; + u16 h; + + u32 handle; + u64 offset; + } image; + + struct { + u16 x; + u16 y; + } point; + + union { + struct { + bool ntfy:1; + bool sema:1; + bool image:1; + }; + u8 mask; + } clr; + + union { + struct { + bool ntfy:1; + bool sema:1; + bool image:1; + bool lut:1; + bool point:1; + }; + u8 mask; + } set; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c new file mode 100644 index 000000000000..12ca5d70509c --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/base.c @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "base.h" + +#include <nvif/class.h> + +int +nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) +{ + struct { + s32 oclass; + int version; + int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); + } bases[] = { + { GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + { NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, + {} + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int cid; + + cid = nvif_mclass(&disp->disp->object, bases); + if (cid < 0) { + NV_ERROR(drm, "No supported base class\n"); + return cid; + } + + return bases[cid].new(drm, head, bases[cid].oclass, pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h new file mode 100644 index 000000000000..1daba7319ba9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/base.h @@ -0,0 +1,8 @@ +#ifndef __NV50_KMS_BASE_H__ +#define __NV50_KMS_BASE_H__ +#include "wndw.h" + +int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **); + +int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **); +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c new file mode 100644 index 000000000000..b73e7b4d86a5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -0,0 +1,307 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "base.h" + +#include <nvif/class.h> +#include <nvif/cl507c.h> +#include <nvif/event.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> +#include "nouveau_bo.h" + +static u32 +base507c_update(struct nv50_wndw *wndw, u32 interlock) +{ + u32 *push; + + if (!(push = evo_wait(&wndw->wndw, 2))) + return 0; + evo_mthd(push, 0x0080, 1); + evo_data(push, interlock); + evo_kick(push, &wndw->wndw); + + if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) + return interlock ? 2 << (wndw->id * 8) : 0; + return interlock ? 2 << (wndw->id * 4) : 0; +} + +static void +base507c_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 2))) { + evo_mthd(push, 0x00e0, 1); + evo_data(push, asyw->lut.enable << 30); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_image_clr(struct nv50_wndw *wndw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 4))) { + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, 0x00000000); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + const s32 oclass = wndw->wndw.base.user.oclass; + u32 *push; + if ((push = evo_wait(&wndw->wndw, 10))) { + evo_mthd(push, 0x0084, 1); + evo_data(push, asyw->image.mode << 8 | + asyw->image.interval << 4); + evo_mthd(push, 0x00c0, 1); + evo_data(push, asyw->image.handle); + if (oclass < G82_DISP_BASE_CHANNEL_DMA) { + evo_mthd(push, 0x0800, 5); + evo_data(push, asyw->image.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, (asyw->image.h << 16) | asyw->image.w); + evo_data(push, (asyw->image.layout << 20) | + asyw->image.pitch | + asyw->image.block); + evo_data(push, (asyw->image.kind << 16) | + (asyw->image.format << 8)); + } else + if (oclass < GF110_DISP_BASE_CHANNEL_DMA) { + evo_mthd(push, 0x0800, 5); + evo_data(push, asyw->image.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, (asyw->image.h << 16) | asyw->image.w); + evo_data(push, (asyw->image.layout << 20) | + asyw->image.pitch | + asyw->image.block); + evo_data(push, asyw->image.format << 8); + } else { + evo_mthd(push, 0x0400, 5); + evo_data(push, asyw->image.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, (asyw->image.h << 16) | asyw->image.w); + evo_data(push, (asyw->image.layout << 24) | + asyw->image.pitch | + asyw->image.block); + evo_data(push, asyw->image.format << 8); + } + evo_kick(push, &wndw->wndw); + } +} + +static int +base507c_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); + struct nv50_disp *disp = nv50_disp(wndw->plane.dev); + if (nvif_msec(&drm->client.device, 2000ULL, + u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4); + if ((data & 0xc0000000) == 0x40000000) + break; + usleep_range(1, 2); + ) < 0) + return -ETIMEDOUT; + return 0; +} + +static void +base507c_ntfy_clr(struct nv50_wndw *wndw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 2))) { + evo_mthd(push, 0x00a4, 1); + evo_data(push, 0x00000000); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 3))) { + evo_mthd(push, 0x00a0, 2); + evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset); + evo_data(push, asyw->ntfy.handle); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_sema_clr(struct nv50_wndw *wndw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 2))) { + evo_mthd(push, 0x0094, 1); + evo_data(push, 0x00000000); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + u32 *push; + if ((push = evo_wait(&wndw->wndw, 5))) { + evo_mthd(push, 0x0088, 4); + evo_data(push, asyw->sema.offset); + evo_data(push, asyw->sema.acquire); + evo_data(push, asyw->sema.release); + evo_data(push, asyw->sema.handle); + evo_kick(push, &wndw->wndw); + } +} + +static void +base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + asyh->base.cpp = 0; +} + +static int +base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + const struct drm_framebuffer *fb = asyw->state.fb; + int ret; + + if (!fb->format->depth) + return -EINVAL; + + ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); + if (ret) + return ret; + + asyh->base.depth = fb->format->depth; + asyh->base.cpp = fb->format->cpp[0]; + asyh->base.x = asyw->state.src.x1 >> 16; + asyh->base.y = asyw->state.src.y1 >> 16; + asyh->base.w = asyw->state.fb->width; + asyh->base.h = asyw->state.fb->height; + + switch (fb->format->format) { + case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break; + case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break; + case DRM_FORMAT_XRGB1555 : + case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break; + case DRM_FORMAT_XRGB8888 : + case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break; + case DRM_FORMAT_XBGR8888 : + case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break; + default: + WARN_ON(1); + return -EINVAL; + } + + asyw->lut.enable = 1; + asyw->set.image = true; + return 0; +} + +static const u32 +base507c_format[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + 0 +}; + +static const struct nv50_wndw_func +base507c = { + .acquire = base507c_acquire, + .release = base507c_release, + .sema_set = base507c_sema_set, + .sema_clr = base507c_sema_clr, + .ntfy_set = base507c_ntfy_set, + .ntfy_clr = base507c_ntfy_clr, + .ntfy_wait_begun = base507c_ntfy_wait_begun, + .image_set = base507c_image_set, + .image_clr = base507c_image_clr, + .lut = base507c_lut, + .update = base507c_update, +}; + +static int +base507c_new_(const struct nv50_wndw_func *func, const u32 *format, + struct nouveau_drm *drm, int head, s32 oclass, + struct nv50_wndw **pwndw) +{ + struct nv50_disp_base_channel_dma_v0 args = { + .head = head, + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + struct nv50_wndw *wndw; + int ret; + + ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY, + "base", head, format, &wndw); + if (*pwndw = wndw, ret) + return ret; + + ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, + &oclass, head, &args, sizeof(args), + disp->sync->bo.offset, &wndw->wndw); + if (ret) { + NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret); + return ret; + } + + ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, + false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT, + &(struct nvif_notify_uevent_req) {}, + sizeof(struct nvif_notify_uevent_req), + sizeof(struct nvif_notify_uevent_rep), + &wndw->notify); + if (ret) + return ret; + + wndw->ntfy = NV50_DISP_BASE_NTFY(wndw->id); + wndw->sema = NV50_DISP_BASE_SEM0(wndw->id); + wndw->data = 0x00000000; + return 0; +} + +int +base507c_new(struct nouveau_drm *drm, int head, s32 oclass, + struct nv50_wndw **pwndw) +{ + return base507c_new_(&base507c, base507c_format, drm, head, oclass, pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c new file mode 100644 index 000000000000..b12899fe052a --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/core.c @@ -0,0 +1,69 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "core.h" + +#include <nvif/class.h> + +void +nv50_core_del(struct nv50_core **pcore) +{ + struct nv50_core *core = *pcore; + if (core) { + nv50_dmac_destroy(&core->chan); + kfree(*pcore); + *pcore = NULL; + } +} + +int +nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore) +{ + struct { + s32 oclass; + int version; + int (*new)(struct nouveau_drm *, s32, struct nv50_core **); + } cores[] = { + { GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + { NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, + {} + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int cid; + + cid = nvif_mclass(&disp->disp->object, cores); + if (cid < 0) { + NV_ERROR(drm, "No supported core channel class\n"); + return cid; + } + + return cores[cid].new(drm, cores[cid].oclass, pcore); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h new file mode 100644 index 000000000000..3cd54469311a --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -0,0 +1,26 @@ +#ifndef __NV50_KMS_CORE_H__ +#define __NV50_KMS_CORE_H__ +#include "disp.h" +#include "atom.h" + +struct nv50_core { + const struct nv50_core_func *func; + struct nv50_dmac chan; +}; + +int nv50_core_new(struct nouveau_drm *, struct nv50_core **); +void nv50_core_del(struct nv50_core **); + +struct nv50_core_func { + const struct nv50_head_func *head; + const struct nv50_outp_func { + void (*ctrl)(struct nv50_core *, int or, u32 ctrl, + struct nv50_head_atom *); + } *dac, *pior, *sor; +}; + +int core507d_new(struct nouveau_drm *, s32, struct nv50_core **); +extern const struct nv50_outp_func dac507d; +extern const struct nv50_outp_func sor507d; +extern const struct nv50_outp_func pior507d; +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c new file mode 100644 index 000000000000..b0325f69a26f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -0,0 +1,65 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "core.h" +#include "head.h" + +#include <nvif/cl507d.h> + +#include "nouveau_bo.h" + +static const struct nv50_core_func +core507d = { + .head = &head507d, + .dac = &dac507d, + .sor = &sor507d, + .pior = &pior507d, +}; + +static int +core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm, + s32 oclass, struct nv50_core **pcore) +{ + struct nv50_disp_core_channel_dma_v0 args = {}; + struct nv50_disp *disp = nv50_disp(drm->dev); + struct nv50_core *core; + int ret; + + if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL))) + return -ENOMEM; + core->func = func; + + ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, + &oclass, 0, &args, sizeof(args), + disp->sync->bo.offset, &core->chan); + if (ret) { + NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret); + return ret; + } + + return 0; +} + +int +core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore) +{ + return core507d_new_(&core507d, drm, oclass, pcore); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c new file mode 100644 index 000000000000..6d60e978db69 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "curs.h" + +#include <nvif/class.h> + +int +nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) +{ + struct { + s32 oclass; + int version; + int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); + } curses[] = { + { GK104_DISP_CURSOR, 0, curs507a_new }, + { GF110_DISP_CURSOR, 0, curs507a_new }, + { GT214_DISP_CURSOR, 0, curs507a_new }, + { G82_DISP_CURSOR, 0, curs507a_new }, + { NV50_DISP_CURSOR, 0, curs507a_new }, + {} + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int cid; + + cid = nvif_mclass(&disp->disp->object, curses); + if (cid < 0) { + NV_ERROR(drm, "No supported cursor immediate class\n"); + return cid; + } + + return curses[cid].new(drm, head, curses[cid].oclass, pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h new file mode 100644 index 000000000000..b85ca9fa419c --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h @@ -0,0 +1,8 @@ +#ifndef __NV50_KMS_CURS_H__ +#define __NV50_KMS_CURS_H__ +#include "wndw.h" + +int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **); + +int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **); +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c new file mode 100644 index 000000000000..1a3e199b5b45 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -0,0 +1,151 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "curs.h" +#include "core.h" + +#include <nvif/cl507a.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +static u32 +curs507a_update(struct nv50_wndw *wndw, u32 interlock) +{ + nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); + return 0; +} + +static void +curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 | + asyw->point.x); +} + +static const struct nv50_wimm_func +curs507a = { + .point = curs507a_point, + .update = curs507a_update, +}; + +static void +curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, + struct nv50_wndw_atom *asyw) +{ + u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle; + u32 offset = asyw->image.offset; + if (asyh->curs.handle != handle || asyh->curs.offset != offset) { + asyh->curs.handle = handle; + asyh->curs.offset = offset; + asyh->set.curs = asyh->curs.visible; + } +} + +static void +curs507a_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + asyh->curs.visible = false; +} + +static int +curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + int ret; + + ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); + asyh->curs.visible = asyw->state.visible; + if (ret || !asyh->curs.visible) + return ret; + + switch (asyw->state.fb->width) { + case 32: asyh->curs.layout = 0; break; + case 64: asyh->curs.layout = 1; break; + default: + return -EINVAL; + } + + if (asyw->state.fb->width != asyw->state.fb->height) + return -EINVAL; + + switch (asyw->state.fb->format->format) { + case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break; + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static const u32 +curs507a_format[] = { + DRM_FORMAT_ARGB8888, + 0 +}; + +static const struct nv50_wndw_func +curs507a_wndw = { + .acquire = curs507a_acquire, + .release = curs507a_release, + .prepare = curs507a_prepare, +}; + +static int +curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, + int head, s32 oclass, struct nv50_wndw **pwndw) +{ + struct nv50_disp_cursor_v0 args = { + .head = head, + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + struct nv50_wndw *wndw; + int ret; + + ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR, + "curs", head, curs507a_format, &wndw); + if (*pwndw = wndw, ret) + return ret; + + ret = nvif_object_init(&disp->disp->object, 0, oclass, &args, + sizeof(args), &wndw->wimm.base.user); + if (ret) { + NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret); + return ret; + } + + nvif_object_map(&wndw->wimm.base.user, NULL, 0); + wndw->immd = func; + wndw->ctxdma.parent = &disp->core->chan.base.user; + return 0; +} + +int +curs507a_new(struct nouveau_drm *drm, int head, s32 oclass, + struct nv50_wndw **pwndw) +{ + return curs507a_new_(&curs507a, drm, head, oclass, pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c new file mode 100644 index 000000000000..28b6025a80f3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "core.h" + +#include <nvif/class.h> + +static void +dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl, + struct nv50_head_atom *asyh) +{ + u32 *push, sync = 0; + if ((push = evo_wait(&core->chan, 3))) { + if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + if (asyh) { + sync |= asyh->or.nvsync << 1; + sync |= asyh->or.nhsync; + } + evo_mthd(push, 0x0400 + (or * 0x080), 2); + evo_data(push, ctrl); + evo_data(push, sync); + } else { + evo_mthd(push, 0x0180 + (or * 0x020), 1); + evo_data(push, ctrl); + } + evo_kick(push, &core->chan); + } +} + +const struct nv50_outp_func +dac507d = { + .ctrl = dac507d_ctrl, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 995109ee5762..a8367c5d4691 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -21,12 +21,16 @@ * * Authors: Ben Skeggs */ +#include "disp.h" +#include "atom.h" +#include "core.h" +#include "head.h" +#include "wndw.h" #include <linux/dma-mapping.h> #include <linux/hdmi.h> #include <drm/drmP.h> -#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_dp_helper.h> @@ -34,16 +38,10 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_edid.h> -#include <nvif/mem.h> - #include <nvif/class.h> #include <nvif/cl0002.h> #include <nvif/cl5070.h> -#include <nvif/cl507a.h> -#include <nvif/cl507b.h> -#include <nvif/cl507c.h> #include <nvif/cl507d.h> -#include <nvif/cl507e.h> #include <nvif/event.h> #include "nouveau_drv.h" @@ -51,39 +49,12 @@ #include "nouveau_gem.h" #include "nouveau_connector.h" #include "nouveau_encoder.h" -#include "nouveau_crtc.h" #include "nouveau_fence.h" #include "nouveau_fbcon.h" -#include "nv50_display.h" - -#define EVO_DMA_NR 9 - -#define EVO_MASTER (0x00) -#define EVO_FLIP(c) (0x01 + (c)) -#define EVO_OVLY(c) (0x05 + (c)) -#define EVO_OIMM(c) (0x09 + (c)) -#define EVO_CURS(c) (0x0d + (c)) - -/* offsets in shared sync bo of various structures */ -#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) -#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) -#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) -#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) -#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20) -#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30) /****************************************************************************** * Atomic state *****************************************************************************/ -#define nv50_atom(p) container_of((p), struct nv50_atom, state) - -struct nv50_atom { - struct drm_atomic_state state; - - struct list_head outp; - bool lock_core; - bool flush_disable; -}; struct nv50_outp_atom { struct list_head head; @@ -106,209 +77,10 @@ struct nv50_outp_atom { } set; }; -#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state) - -struct nv50_head_atom { - struct drm_crtc_state state; - - struct { - u16 iW; - u16 iH; - u16 oW; - u16 oH; - } view; - - struct nv50_head_mode { - bool interlace; - u32 clock; - struct { - u16 active; - u16 synce; - u16 blanke; - u16 blanks; - } h; - struct { - u32 active; - u16 synce; - u16 blanke; - u16 blanks; - u16 blank2s; - u16 blank2e; - u16 blankus; - } v; - } mode; - - struct { - bool visible; - u32 handle; - u64 offset:40; - u8 mode:4; - } lut; - - struct { - bool visible; - u32 handle; - u64 offset:40; - u8 format; - u8 kind:7; - u8 layout:1; - u8 block:4; - u32 pitch:20; - u16 x; - u16 y; - u16 w; - u16 h; - } core; - - struct { - bool visible; - u32 handle; - u64 offset:40; - u8 layout:1; - u8 format:1; - } curs; - - struct { - u8 depth; - u8 cpp; - u16 x; - u16 y; - u16 w; - u16 h; - } base; - - struct { - u8 cpp; - } ovly; - - struct { - bool enable:1; - u8 bits:2; - u8 mode:4; - } dither; - - struct { - struct { - u16 cos:12; - u16 sin:12; - } sat; - } procamp; - - struct { - u8 nhsync:1; - u8 nvsync:1; - u8 depth:4; - } or; - - union { - struct { - bool ilut:1; - bool core:1; - bool curs:1; - }; - u8 mask; - } clr; - - union { - struct { - bool ilut:1; - bool core:1; - bool curs:1; - bool view:1; - bool mode:1; - bool base:1; - bool ovly:1; - bool dither:1; - bool procamp:1; - bool or:1; - }; - u16 mask; - } set; -}; - -static inline struct nv50_head_atom * -nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc) -{ - struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(statec)) - return (void *)statec; - return nv50_head_atom(statec); -} - -#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state) - -struct nv50_wndw_atom { - struct drm_plane_state state; - u8 interval; - - struct { - u32 handle; - u16 offset:12; - bool awaken:1; - } ntfy; - - struct { - u32 handle; - u16 offset:12; - u32 acquire; - u32 release; - } sema; - - struct { - u8 enable:2; - } lut; - - struct { - u8 mode:2; - u8 interval:4; - - u8 format; - u8 kind:7; - u8 layout:1; - u8 block:4; - u32 pitch:20; - u16 w; - u16 h; - - u32 handle; - u64 offset; - } image; - - struct { - u16 x; - u16 y; - } point; - - union { - struct { - bool ntfy:1; - bool sema:1; - bool image:1; - }; - u8 mask; - } clr; - - union { - struct { - bool ntfy:1; - bool sema:1; - bool image:1; - bool lut:1; - bool point:1; - }; - u8 mask; - } set; -}; - /****************************************************************************** * EVO channel *****************************************************************************/ -struct nv50_chan { - struct nvif_object user; - struct nvif_device *device; -}; - static int nv50_chan_create(struct nvif_device *device, struct nvif_object *disp, const s32 *oclass, u8 head, void *data, u32 size, @@ -351,27 +123,7 @@ nv50_chan_destroy(struct nv50_chan *chan) * DMA EVO channel *****************************************************************************/ -struct nv50_wndw_ctxdma { - struct list_head head; - struct nvif_object object; -}; - -struct nv50_dmac { - struct nv50_chan base; - - struct nvif_mem push; - u32 *ptr; - - struct nvif_object sync; - struct nvif_object vram; - - /* Protects against concurrent pushbuf access to this channel, lock is - * grabbed by evo_wait (if the pushbuf reservation is successful) and - * dropped again by evo_kick. */ - struct mutex lock; -}; - -static void +void nv50_dmac_destroy(struct nv50_dmac *dmac) { nvif_object_fini(&dmac->vram); @@ -382,7 +134,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac) nvif_mem_fini(&dmac->push); } -static int +int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, struct nv50_dmac *dmac) @@ -433,107 +185,10 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, } /****************************************************************************** - * Base - *****************************************************************************/ - -struct nv50_sync { - struct nv50_dmac base; - u32 addr; - u32 data; -}; - -struct nv50_head { - const struct nv50_head_func *func; - struct nouveau_crtc base; - struct { - struct nouveau_bo *nvbo[2]; - int next; - } lut; -}; - -struct nv50_head_func { - void (*view)(struct nv50_head *, struct nv50_head_atom *); - void (*mode)(struct nv50_head *, struct nv50_head_atom *); - void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *); - void (*ilut_clr)(struct nv50_head *); - void (*core_set)(struct nv50_head *, struct nv50_head_atom *); - void (*core_clr)(struct nv50_head *); - void (*curs_set)(struct nv50_head *, struct nv50_head_atom *); - void (*curs_clr)(struct nv50_head *); - void (*base)(struct nv50_head *, struct nv50_head_atom *); - void (*ovly)(struct nv50_head *, struct nv50_head_atom *); - void (*dither)(struct nv50_head *, struct nv50_head_atom *); - void (*procamp)(struct nv50_head *, struct nv50_head_atom *); - void (*or)(struct nv50_head *, struct nv50_head_atom *); -}; - -#define nv50_head(c) container_of((c), struct nv50_head, base.base) - -struct nv50_disp { - struct nvif_disp *disp; - struct nv50_core *core; - - struct nouveau_bo *sync; - - struct mutex mutex; -}; - -static struct nv50_disp * -nv50_disp(struct drm_device *dev) -{ - return nouveau_display(dev)->priv; -} - -/****************************************************************************** - * Core - *****************************************************************************/ - -struct nv50_core { - const struct nv50_core_func *func; - struct nv50_dmac chan; -}; - -struct nv50_core_func { - const struct nv50_head_func *head; - const struct nv50_outp_func *dac; - const struct nv50_outp_func *sor; - const struct nv50_outp_func *pior; -}; - -struct nv50_outp_func { - void (*ctrl)(struct nv50_core *, int or, u32 ctrl, - struct nv50_head_atom *); -}; - -static int -core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm, - s32 oclass, struct nv50_core **pcore) -{ - struct nv50_disp_core_channel_dma_v0 args = {}; - struct nv50_disp *disp = nv50_disp(drm->dev); - struct nv50_core *core; - int ret; - - if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL))) - return -ENOMEM; - core->func = func; - - ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, - &oclass, 0, &args, sizeof(args), - disp->sync->bo.offset, &core->chan); - if (ret) { - NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret); - return ret; - } - - return 0; -} - -/****************************************************************************** * EVO channel helpers *****************************************************************************/ -static u32 * -evo_wait(void *evoc, int nr) +u32 * +evo_wait(struct nv50_dmac *evoc, int nr) { struct nv50_dmac *dmac = evoc; struct nvif_device *device = dmac->base.device; @@ -559,2063 +214,14 @@ evo_wait(void *evoc, int nr) return dmac->ptr + put; } -static void -evo_kick(u32 *push, void *evoc) +void +evo_kick(u32 *push, struct nv50_dmac *evoc) { struct nv50_dmac *dmac = evoc; nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); mutex_unlock(&dmac->lock); } -#define evo_mthd(p, m, s) do { \ - const u32 _m = (m), _s = (s); \ - if (drm_debug & DRM_UT_KMS) \ - pr_err("%04x %d %s\n", _m, _s, __func__); \ - *((p)++) = ((_s << 18) | _m); \ -} while(0) - -#define evo_data(p, d) do { \ - const u32 _d = (d); \ - if (drm_debug & DRM_UT_KMS) \ - pr_err("\t%08x\n", _d); \ - *((p)++) = _d; \ -} while(0) - -/****************************************************************************** - * Plane - *****************************************************************************/ -#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane) - -struct nv50_wndw { - const struct nv50_wndw_func *func; - const struct nv50_wimm_func *immd; - int id; - - struct { - struct nvif_object *parent; - struct list_head list; - } ctxdma; - - struct drm_plane plane; - - struct nv50_dmac wndw; - struct nv50_dmac wimm; - - struct nvif_notify notify; - u16 ntfy; - u16 sema; - u32 data; -}; - -struct nv50_wndw_func { - int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh); - void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh); - void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh, - struct nv50_wndw_atom *asyw); - - void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *); - void (*sema_clr)(struct nv50_wndw *); - void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *); - void (*ntfy_clr)(struct nv50_wndw *); - int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *); - void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *); - void (*image_clr)(struct nv50_wndw *); - void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *); - - u32 (*update)(struct nv50_wndw *, u32 interlock); -}; - -struct nv50_wimm_func { - void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *); - - u32 (*update)(struct nv50_wndw *, u32 interlock); -}; - -static void -nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma) -{ - nvif_object_fini(&ctxdma->object); - list_del(&ctxdma->head); - kfree(ctxdma); -} - -static struct nv50_wndw_ctxdma * -nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb) -{ - struct nouveau_drm *drm = nouveau_drm(fb->base.dev); - struct nv50_wndw_ctxdma *ctxdma; - const u8 kind = fb->nvbo->kind; - const u32 handle = 0xfb000000 | kind; - struct { - struct nv_dma_v0 base; - union { - struct nv50_dma_v0 nv50; - struct gf100_dma_v0 gf100; - struct gf119_dma_v0 gf119; - }; - } args = {}; - u32 argc = sizeof(args.base); - int ret; - - list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) { - if (ctxdma->object.handle == handle) - return ctxdma; - } - - if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL))) - return ERR_PTR(-ENOMEM); - list_add(&ctxdma->head, &wndw->ctxdma.list); - - args.base.target = NV_DMA_V0_TARGET_VRAM; - args.base.access = NV_DMA_V0_ACCESS_RDWR; - args.base.start = 0; - args.base.limit = drm->client.device.info.ram_user - 1; - - if (drm->client.device.info.chipset < 0x80) { - args.nv50.part = NV50_DMA_V0_PART_256; - argc += sizeof(args.nv50); - } else - if (drm->client.device.info.chipset < 0xc0) { - args.nv50.part = NV50_DMA_V0_PART_256; - args.nv50.kind = kind; - argc += sizeof(args.nv50); - } else - if (drm->client.device.info.chipset < 0xd0) { - args.gf100.kind = kind; - argc += sizeof(args.gf100); - } else { - args.gf119.page = GF119_DMA_V0_PAGE_LP; - args.gf119.kind = kind; - argc += sizeof(args.gf119); - } - - ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY, - &args, argc, &ctxdma->object); - if (ret) { - nv50_wndw_ctxdma_del(ctxdma); - return ERR_PTR(ret); - } - - return ctxdma; -} - -static int -nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - if (asyw->set.ntfy) - return wndw->func->ntfy_wait_begun(wndw, asyw); - return 0; -} - -static u32 -nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush, - struct nv50_wndw_atom *asyw) -{ - if (asyw->clr.sema && (!asyw->set.sema || flush)) - wndw->func->sema_clr(wndw); - if (asyw->clr.ntfy && (!asyw->set.ntfy || flush)) - wndw->func->ntfy_clr(wndw); - if (asyw->clr.image && (!asyw->set.image || flush)) - wndw->func->image_clr(wndw); - - return flush ? wndw->func->update(wndw, interlock) : 0; -} - -static u32 -nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock, - struct nv50_wndw_atom *asyw) -{ - if (interlock) { - asyw->image.mode = 0; - asyw->image.interval = 1; - } - - if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw); - if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw); - if (asyw->set.image) wndw->func->image_set(wndw, asyw); - if (asyw->set.lut ) wndw->func->lut (wndw, asyw); - if (asyw->set.point) { - wndw->immd->point(wndw, asyw); - wndw->immd->update(wndw, interlock); - } - - return wndw->func->update ? wndw->func->update(wndw, interlock) : 0; -} - -static void -nv50_wndw_atomic_check_release(struct nv50_wndw *wndw, - struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); - NV_ATOMIC(drm, "%s release\n", wndw->plane.name); - wndw->func->release(wndw, asyw, asyh); - asyw->ntfy.handle = 0; - asyw->sema.handle = 0; -} - -static int -nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, - struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb); - struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); - int ret; - - NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name); - - asyw->image.w = fb->base.width; - asyw->image.h = fb->base.height; - asyw->image.kind = fb->nvbo->kind; - - if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) - asyw->interval = 0; - else - asyw->interval = 1; - - if (asyw->image.kind) { - asyw->image.layout = 0; - if (drm->client.device.info.chipset >= 0xc0) - asyw->image.block = fb->nvbo->mode >> 4; - else - asyw->image.block = fb->nvbo->mode; - asyw->image.pitch = (fb->base.pitches[0] / 4) << 4; - } else { - asyw->image.layout = 1; - asyw->image.block = 0; - asyw->image.pitch = fb->base.pitches[0]; - } - - ret = wndw->func->acquire(wndw, asyw, asyh); - if (ret) - return ret; - - if (asyw->set.image) { - if (!(asyw->image.mode = asyw->interval ? 0 : 1)) - asyw->image.interval = asyw->interval; - else - asyw->image.interval = 0; - } - - return 0; -} - -static int -nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) -{ - struct nouveau_drm *drm = nouveau_drm(plane->dev); - struct nv50_wndw *wndw = nv50_wndw(plane); - struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state); - struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); - struct nv50_head_atom *harm = NULL, *asyh = NULL; - bool varm = false, asyv = false, asym = false; - int ret; - - NV_ATOMIC(drm, "%s atomic_check\n", plane->name); - if (asyw->state.crtc) { - asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc); - if (IS_ERR(asyh)) - return PTR_ERR(asyh); - asym = drm_atomic_crtc_needs_modeset(&asyh->state); - asyv = asyh->state.active; - } - - if (armw->state.crtc) { - harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc); - if (IS_ERR(harm)) - return PTR_ERR(harm); - varm = harm->state.crtc->state->active; - } - - if (asyv) { - asyw->point.x = asyw->state.crtc_x; - asyw->point.y = asyw->state.crtc_y; - if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) - asyw->set.point = true; - - ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); - if (ret) - return ret; - } else - if (varm) { - nv50_wndw_atomic_check_release(wndw, asyw, harm); - } else { - return 0; - } - - if (!asyv || asym) { - asyw->clr.ntfy = armw->ntfy.handle != 0; - asyw->clr.sema = armw->sema.handle != 0; - if (wndw->func->image_clr) - asyw->clr.image = armw->image.handle != 0; - asyw->set.lut = wndw->func->lut && asyv; - } - - return 0; -} - -static void -nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) -{ - struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb); - struct nouveau_drm *drm = nouveau_drm(plane->dev); - - NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb); - if (!old_state->fb) - return; - - nouveau_bo_unpin(fb->nvbo); -} - -static int -nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) -{ - struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb); - struct nouveau_drm *drm = nouveau_drm(plane->dev); - struct nv50_wndw *wndw = nv50_wndw(plane); - struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); - struct nv50_head_atom *asyh; - struct nv50_wndw_ctxdma *ctxdma; - int ret; - - NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb); - if (!asyw->state.fb) - return 0; - - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true); - if (ret) - return ret; - - ctxdma = nv50_wndw_ctxdma_new(wndw, fb); - if (IS_ERR(ctxdma)) { - nouveau_bo_unpin(fb->nvbo); - return PTR_ERR(ctxdma); - } - - asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); - asyw->image.handle = ctxdma->object.handle; - asyw->image.offset = fb->nvbo->bo.offset; - - if (wndw->func->prepare) { - asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc); - if (IS_ERR(asyh)) - return PTR_ERR(asyh); - - wndw->func->prepare(wndw, asyh, asyw); - } - - return 0; -} - -static const struct drm_plane_helper_funcs -nv50_wndw_helper = { - .prepare_fb = nv50_wndw_prepare_fb, - .cleanup_fb = nv50_wndw_cleanup_fb, - .atomic_check = nv50_wndw_atomic_check, -}; - -static void -nv50_wndw_atomic_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); - __drm_atomic_helper_plane_destroy_state(&asyw->state); - kfree(asyw); -} - -static struct drm_plane_state * -nv50_wndw_atomic_duplicate_state(struct drm_plane *plane) -{ - struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state); - struct nv50_wndw_atom *asyw; - if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) - return NULL; - __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); - asyw->interval = 1; - asyw->sema = armw->sema; - asyw->ntfy = armw->ntfy; - asyw->image = armw->image; - asyw->point = armw->point; - asyw->lut = armw->lut; - asyw->clr.mask = 0; - asyw->set.mask = 0; - return &asyw->state; -} - -static void -nv50_wndw_reset(struct drm_plane *plane) -{ - struct nv50_wndw_atom *asyw; - - if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL)))) - return; - - if (plane->state) - plane->funcs->atomic_destroy_state(plane, plane->state); - plane->state = &asyw->state; - plane->state->plane = plane; - plane->state->rotation = DRM_MODE_ROTATE_0; -} - -static void -nv50_wndw_destroy(struct drm_plane *plane) -{ - struct nv50_wndw *wndw = nv50_wndw(plane); - struct nv50_wndw_ctxdma *ctxdma, *ctxtmp; - - list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) { - nv50_wndw_ctxdma_del(ctxdma); - } - - nvif_notify_fini(&wndw->notify); - nv50_dmac_destroy(&wndw->wimm); - nv50_dmac_destroy(&wndw->wndw); - drm_plane_cleanup(&wndw->plane); - kfree(wndw); -} - -static const struct drm_plane_funcs -nv50_wndw = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = nv50_wndw_destroy, - .reset = nv50_wndw_reset, - .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state, - .atomic_destroy_state = nv50_wndw_atomic_destroy_state, -}; - -static int -nv50_wndw_notify(struct nvif_notify *notify) -{ - return NVIF_NOTIFY_KEEP; -} - -static void -nv50_wndw_fini(struct nv50_wndw *wndw) -{ - nvif_notify_put(&wndw->notify); -} - -static void -nv50_wndw_init(struct nv50_wndw *wndw) -{ - nvif_notify_get(&wndw->notify); -} - -static int -nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, - enum drm_plane_type type, const char *name, int index, - const u32 *format, struct nv50_wndw **pwndw) -{ - struct nv50_wndw *wndw; - int nformat; - int ret; - - if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL))) - return -ENOMEM; - wndw->func = func; - wndw->id = index; - - wndw->ctxdma.parent = &wndw->wndw.base.user; - INIT_LIST_HEAD(&wndw->ctxdma.list); - - for (nformat = 0; format[nformat]; nformat++); - - ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, - format, nformat, NULL, - type, "%s-%d", name, index); - if (ret) { - kfree(*pwndw); - *pwndw = NULL; - return ret; - } - - drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); - - wndw->notify.func = nv50_wndw_notify; - return 0; -} - -/****************************************************************************** - * Overlay - *****************************************************************************/ - -static const struct nv50_wimm_func -oimm507b = { -}; - -static int -oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, - s32 oclass, struct nv50_wndw *wndw) -{ - struct nv50_disp_overlay_v0 args = { - .head = wndw->id, - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int ret; - - ret = nvif_object_init(&disp->disp->object, 0, oclass, &args, - sizeof(args), &wndw->wimm.base.user); - if (ret) { - NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret); - return ret; - } - - nvif_object_map(&wndw->wimm.base.user, NULL, 0); - wndw->immd = func; - return 0; -} - -static int -oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw) -{ - return oimm507b_init_(&oimm507b, drm, oclass, wndw); -} - -static int -nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw) -{ - static const struct { - s32 oclass; - int version; - int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *); - } oimms[] = { - { GK104_DISP_OVERLAY, 0, oimm507b_init }, - { GF110_DISP_OVERLAY, 0, oimm507b_init }, - { GT214_DISP_OVERLAY, 0, oimm507b_init }, - { G82_DISP_OVERLAY, 0, oimm507b_init }, - { NV50_DISP_OVERLAY, 0, oimm507b_init }, - {} - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int cid; - - cid = nvif_mclass(&disp->disp->object, oimms); - if (cid < 0) { - NV_ERROR(drm, "No supported overlay immediate class\n"); - return cid; - } - - return oimms[cid].init(drm, oimms[cid].oclass, wndw); -} - -static const struct nv50_wndw_func -ovly507e = { -}; - -static const u32 -ovly507e_format[] = { - 0 -}; - -static int -ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format, - struct nouveau_drm *drm, int head, s32 oclass, - struct nv50_wndw **pwndw) -{ - struct nv50_disp_overlay_channel_dma_v0 args = { - .head = head, - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - struct nv50_wndw *wndw; - int ret; - - ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY, - "ovly", head, format, &wndw); - if (*pwndw = wndw, ret) - return ret; - - ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, - &oclass, 0, &args, sizeof(args), - disp->sync->bo.offset, &wndw->wndw); - if (ret) { - NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret); - return ret; - } - - return 0; -} - -static int -ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass, - struct nv50_wndw **pwndw) -{ - return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw); -} - -static int -nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) -{ - static const struct { - s32 oclass; - int version; - int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); - } ovlys[] = { - { GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new }, - { GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new }, - { GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, - { GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, - { G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, - { NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, - {} - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int cid, ret; - - cid = nvif_mclass(&disp->disp->object, ovlys); - if (cid < 0) { - NV_ERROR(drm, "No supported overlay class\n"); - return cid; - } - - ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw); - if (ret) - return ret; - - return nv50_oimm_init(drm, *pwndw); -} - -/****************************************************************************** - * Cursor plane - *****************************************************************************/ -static u32 -nv50_curs_update(struct nv50_wndw *wndw, u32 interlock) -{ - nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); - return 0; -} - -static void -nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - nvif_wr32(&wndw->wimm.base.user, 0x0084, (asyw->point.y << 16) | - asyw->point.x); -} - -static const struct nv50_wimm_func -curs507a = { - .point = nv50_curs_point, - .update = nv50_curs_update, -}; - -static void -nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, - struct nv50_wndw_atom *asyw) -{ - u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle; - u32 offset = asyw->image.offset; - if (asyh->curs.handle != handle || asyh->curs.offset != offset) { - asyh->curs.handle = handle; - asyh->curs.offset = offset; - asyh->set.curs = asyh->curs.visible; - } -} - -static void -nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - asyh->curs.visible = false; -} - -static int -nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - int ret; - - ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true); - asyh->curs.visible = asyw->state.visible; - if (ret || !asyh->curs.visible) - return ret; - - switch (asyw->state.fb->width) { - case 32: asyh->curs.layout = 0; break; - case 64: asyh->curs.layout = 1; break; - default: - return -EINVAL; - } - - if (asyw->state.fb->width != asyw->state.fb->height) - return -EINVAL; - - switch (asyw->state.fb->format->format) { - case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break; - default: - WARN_ON(1); - return -EINVAL; - } - - return 0; -} - -static const u32 -nv50_curs_format[] = { - DRM_FORMAT_ARGB8888, - 0 -}; - -static const struct nv50_wndw_func -nv50_curs = { - .acquire = nv50_curs_acquire, - .release = nv50_curs_release, - .prepare = nv50_curs_prepare, -}; - -static int -curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, - int head, s32 oclass, struct nv50_wndw **pwndw) -{ - struct nv50_disp_cursor_v0 args = { - .head = head, - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - struct nv50_wndw *wndw; - int ret; - - ret = nv50_wndw_new_(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR, - "curs", head, nv50_curs_format, &wndw); - if (*pwndw = wndw, ret) - return ret; - - ret = nvif_object_init(&disp->disp->object, 0, oclass, &args, - sizeof(args), &wndw->wimm.base.user); - if (ret) { - NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret); - return ret; - } - - nvif_object_map(&wndw->wimm.base.user, NULL, 0); - wndw->immd = func; - wndw->ctxdma.parent = &disp->core->chan.base.user; - return 0; -} - -static int -curs507a_new(struct nouveau_drm *drm, int head, s32 oclass, - struct nv50_wndw **pwndw) -{ - return curs507a_new_(&curs507a, drm, head, oclass, pwndw); -} - -static int -nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) -{ - struct { - s32 oclass; - int version; - int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); - } curses[] = { - { GK104_DISP_CURSOR, 0, curs507a_new }, - { GF110_DISP_CURSOR, 0, curs507a_new }, - { GT214_DISP_CURSOR, 0, curs507a_new }, - { G82_DISP_CURSOR, 0, curs507a_new }, - { NV50_DISP_CURSOR, 0, curs507a_new }, - {} - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int cid; - - cid = nvif_mclass(&disp->disp->object, curses); - if (cid < 0) { - NV_ERROR(drm, "No supported cursor immediate class\n"); - return cid; - } - - return curses[cid].new(drm, head, curses[cid].oclass, pwndw); -} - -/****************************************************************************** - * Primary plane - *****************************************************************************/ -static void -nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 2))) { - evo_mthd(push, 0x00e0, 1); - evo_data(push, asyw->lut.enable << 30); - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_image_clr(struct nv50_wndw *wndw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 4))) { - evo_mthd(push, 0x0084, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x00c0, 1); - evo_data(push, 0x00000000); - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - const s32 oclass = wndw->wndw.base.user.oclass; - u32 *push; - if ((push = evo_wait(&wndw->wndw, 10))) { - evo_mthd(push, 0x0084, 1); - evo_data(push, (asyw->image.mode << 8) | - (asyw->image.interval << 4)); - evo_mthd(push, 0x00c0, 1); - evo_data(push, asyw->image.handle); - if (oclass < G82_DISP_BASE_CHANNEL_DMA) { - evo_mthd(push, 0x0800, 5); - evo_data(push, asyw->image.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, (asyw->image.h << 16) | asyw->image.w); - evo_data(push, (asyw->image.layout << 20) | - asyw->image.pitch | - asyw->image.block); - evo_data(push, (asyw->image.kind << 16) | - (asyw->image.format << 8)); - } else - if (oclass < GF110_DISP_BASE_CHANNEL_DMA) { - evo_mthd(push, 0x0800, 5); - evo_data(push, asyw->image.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, (asyw->image.h << 16) | asyw->image.w); - evo_data(push, (asyw->image.layout << 20) | - asyw->image.pitch | - asyw->image.block); - evo_data(push, asyw->image.format << 8); - } else { - evo_mthd(push, 0x0400, 5); - evo_data(push, asyw->image.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, (asyw->image.h << 16) | asyw->image.w); - evo_data(push, (asyw->image.layout << 24) | - asyw->image.pitch | - asyw->image.block); - evo_data(push, asyw->image.format << 8); - } - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_ntfy_clr(struct nv50_wndw *wndw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 2))) { - evo_mthd(push, 0x00a4, 1); - evo_data(push, 0x00000000); - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 3))) { - evo_mthd(push, 0x00a0, 2); - evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset); - evo_data(push, asyw->ntfy.handle); - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_sema_clr(struct nv50_wndw *wndw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 2))) { - evo_mthd(push, 0x0094, 1); - evo_data(push, 0x00000000); - evo_kick(push, &wndw->wndw); - } -} - -static void -nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - u32 *push; - if ((push = evo_wait(&wndw->wndw, 5))) { - evo_mthd(push, 0x0088, 4); - evo_data(push, asyw->sema.offset); - evo_data(push, asyw->sema.acquire); - evo_data(push, asyw->sema.release); - evo_data(push, asyw->sema.handle); - evo_kick(push, &wndw->wndw); - } -} - -static u32 -nv50_base_update(struct nv50_wndw *wndw, u32 interlock) -{ - u32 *push; - - if (!(push = evo_wait(&wndw->wndw, 2))) - return 0; - evo_mthd(push, 0x0080, 1); - evo_data(push, interlock); - evo_kick(push, &wndw->wndw); - - if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) - return interlock ? 2 << (wndw->id * 8) : 0; - return interlock ? 2 << (wndw->id * 4) : 0; -} - -static int -nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) -{ - struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); - struct nv50_disp *disp = nv50_disp(wndw->plane.dev); - if (nvif_msec(&drm->client.device, 2000ULL, - u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4); - if ((data & 0xc0000000) == 0x40000000) - break; - usleep_range(1, 2); - ) < 0) - return -ETIMEDOUT; - return 0; -} - -static void -nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - asyh->base.cpp = 0; -} - -static int -nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, - struct nv50_head_atom *asyh) -{ - const struct drm_framebuffer *fb = asyw->state.fb; - int ret; - - if (!fb->format->depth) - return -EINVAL; - - ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true); - if (ret) - return ret; - - asyh->base.depth = fb->format->depth; - asyh->base.cpp = fb->format->cpp[0]; - asyh->base.x = asyw->state.src.x1 >> 16; - asyh->base.y = asyw->state.src.y1 >> 16; - asyh->base.w = asyw->state.fb->width; - asyh->base.h = asyw->state.fb->height; - - switch (fb->format->format) { - case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break; - case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break; - case DRM_FORMAT_XRGB1555 : - case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break; - case DRM_FORMAT_XRGB8888 : - case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break; - case DRM_FORMAT_XBGR8888 : - case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break; - default: - WARN_ON(1); - return -EINVAL; - } - - asyw->lut.enable = 1; - asyw->set.image = true; - return 0; -} - -static const u32 -nv50_base_format[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB1555, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ABGR8888, - 0 -}; - -static const struct nv50_wndw_func -nv50_base = { - .acquire = nv50_base_acquire, - .release = nv50_base_release, - .sema_set = nv50_base_sema_set, - .sema_clr = nv50_base_sema_clr, - .ntfy_set = nv50_base_ntfy_set, - .ntfy_clr = nv50_base_ntfy_clr, - .ntfy_wait_begun = nv50_base_ntfy_wait_begun, - .image_set = nv50_base_image_set, - .image_clr = nv50_base_image_clr, - .lut = nv50_base_lut, - .update = nv50_base_update, -}; - -static int -base507c_new_(const struct nv50_wndw_func *func, const u32 *format, - struct nouveau_drm *drm, int head, s32 oclass, - struct nv50_wndw **pwndw) -{ - struct nv50_disp_base_channel_dma_v0 args = { - .head = head, - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - struct nv50_wndw *wndw; - int ret; - - ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY, - "base", head, format, &wndw); - if (*pwndw = wndw, ret) - return ret; - - ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, - &oclass, head, &args, sizeof(args), - disp->sync->bo.offset, &wndw->wndw); - if (ret) { - NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret); - return ret; - } - - ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, - false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT, - &(struct nvif_notify_uevent_req) {}, - sizeof(struct nvif_notify_uevent_req), - sizeof(struct nvif_notify_uevent_rep), - &wndw->notify); - if (ret) - return ret; - - wndw->ntfy = EVO_FLIP_NTFY0(wndw->id); - wndw->sema = EVO_FLIP_SEM0(wndw->id); - wndw->data = 0x00000000; - return 0; -} - -static int -base507c_new(struct nouveau_drm *drm, int head, s32 oclass, - struct nv50_wndw **pwndw) -{ - return base507c_new_(&nv50_base, nv50_base_format, drm, head, oclass, pwndw); -} - -static int -nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) -{ - struct { - s32 oclass; - int version; - int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); - } bases[] = { - { GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - { NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new }, - {} - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int cid; - - cid = nvif_mclass(&disp->disp->object, bases); - if (cid < 0) { - NV_ERROR(drm, "No supported base class\n"); - return cid; - } - - return bases[cid].new(drm, head, bases[cid].oclass, pwndw); -} - -/****************************************************************************** - * Head - *****************************************************************************/ -static void -head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA && - (push = evo_wait(core, 3))) { - evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2); - evo_data(push, 0x00000001 | (asyh->or.depth << 6) | - (asyh->or.nvsync << 4) | - (asyh->or.nhsync << 3)); - evo_data(push, 0x31ec6000 | (head->base.index << 25) | - asyh->mode.interlace); - evo_kick(push, core); - } -} - -static void -nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 2))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1); - else - evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1); - evo_data(push, (asyh->procamp.sat.sin << 20) | - (asyh->procamp.sat.cos << 8)); - evo_kick(push, core); - } -} - -static void -nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 2))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1); - else - if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1); - else - evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1); - evo_data(push, (asyh->dither.mode << 3) | - (asyh->dither.bits << 1) | - asyh->dither.enable); - evo_kick(push, core); - } -} - -static void -nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 bounds = 0; - u32 *push; - - if (asyh->base.cpp) { - switch (asyh->base.cpp) { - case 8: bounds |= 0x00000500; break; - case 4: bounds |= 0x00000300; break; - case 2: bounds |= 0x00000100; break; - default: - WARN_ON(1); - break; - } - bounds |= 0x00000001; - } - - if ((push = evo_wait(core, 2))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x0904 + head->base.index * 0x400, 1); - else - evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1); - evo_data(push, bounds); - evo_kick(push, core); - } -} - -static void -nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 bounds = 0; - u32 *push; - - if (asyh->base.cpp) { - switch (asyh->base.cpp) { - case 8: bounds |= 0x00000500; break; - case 4: bounds |= 0x00000300; break; - case 2: bounds |= 0x00000100; break; - case 1: bounds |= 0x00000000; break; - default: - WARN_ON(1); - break; - } - bounds |= 0x00000001; - } - - if ((push = evo_wait(core, 2))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x0900 + head->base.index * 0x400, 1); - else - evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1); - evo_data(push, bounds); - evo_kick(push, core); - } -} - -static void -nv50_head_curs_clr(struct nv50_head *head) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 4))) { - if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); - evo_data(push, 0x05000000); - } else - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x089c + head->base.index * 0x400, 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0480 + head->base.index * 0x300, 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x048c + head->base.index * 0x300, 1); - evo_data(push, 0x00000000); - } - evo_kick(push, core); - } -} - -static void -nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 5))) { - if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) { - evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); - evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | - (asyh->curs.format << 24)); - evo_data(push, asyh->curs.offset >> 8); - } else - if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) { - evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); - evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | - (asyh->curs.format << 24)); - evo_data(push, asyh->curs.offset >> 8); - evo_mthd(push, 0x089c + head->base.index * 0x400, 1); - evo_data(push, asyh->curs.handle); - } else { - evo_mthd(push, 0x0480 + head->base.index * 0x300, 2); - evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | - (asyh->curs.format << 24)); - evo_data(push, asyh->curs.offset >> 8); - evo_mthd(push, 0x048c + head->base.index * 0x300, 1); - evo_data(push, asyh->curs.handle); - } - evo_kick(push, core); - } -} - -static void -nv50_head_core_clr(struct nv50_head *head) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 2))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) - evo_mthd(push, 0x0874 + head->base.index * 0x400, 1); - else - evo_mthd(push, 0x0474 + head->base.index * 0x300, 1); - evo_data(push, 0x00000000); - evo_kick(push, core); - } -} - -static void -nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 9))) { - if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); - evo_data(push, asyh->core.offset >> 8); - evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); - evo_data(push, (asyh->core.h << 16) | asyh->core.w); - evo_data(push, asyh->core.layout << 20 | - (asyh->core.pitch >> 8) << 8 | - asyh->core.block); - evo_data(push, asyh->core.kind << 16 | - asyh->core.format << 8); - evo_data(push, asyh->core.handle); - evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); - evo_data(push, (asyh->core.y << 16) | asyh->core.x); - /* EVO will complain with INVALID_STATE if we have an - * active cursor and (re)specify HeadSetContextDmaIso - * without also updating HeadSetOffsetCursor. - */ - asyh->set.curs = asyh->curs.visible; - } else - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); - evo_data(push, asyh->core.offset >> 8); - evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); - evo_data(push, (asyh->core.h << 16) | asyh->core.w); - evo_data(push, asyh->core.layout << 20 | - (asyh->core.pitch >> 8) << 8 | - asyh->core.block); - evo_data(push, asyh->core.format << 8); - evo_data(push, asyh->core.handle); - evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); - evo_data(push, (asyh->core.y << 16) | asyh->core.x); - } else { - evo_mthd(push, 0x0460 + head->base.index * 0x300, 1); - evo_data(push, asyh->core.offset >> 8); - evo_mthd(push, 0x0468 + head->base.index * 0x300, 4); - evo_data(push, (asyh->core.h << 16) | asyh->core.w); - evo_data(push, asyh->core.layout << 24 | - (asyh->core.pitch >> 8) << 8 | - asyh->core.block); - evo_data(push, asyh->core.format << 8); - evo_data(push, asyh->core.handle); - evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1); - evo_data(push, (asyh->core.y << 16) | asyh->core.x); - } - evo_kick(push, core); - } -} - -static void -nv50_head_lut_clr(struct nv50_head *head) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 4))) { - if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); - evo_data(push, 0x40000000); - } else - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); - evo_data(push, 0x40000000); - evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1); - evo_data(push, 0x03000000); - evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); - evo_data(push, 0x00000000); - } - evo_kick(push, core); - } -} - -static void -nv50_head_lut_load(struct drm_property_blob *blob, int mode, - struct nouveau_bo *nvbo) -{ - struct drm_color_lut *in = (struct drm_color_lut *)blob->data; - void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo); - const int size = blob->length / sizeof(*in); - int bits, shift, i; - u16 zero, r, g, b; - - /* This can't happen.. But it shuts the compiler up. */ - if (WARN_ON(size != 256)) - return; - - switch (mode) { - case 0: /* LORES. */ - case 1: /* HIRES. */ - bits = 11; - shift = 3; - zero = 0x0000; - break; - case 7: /* INTERPOLATE_257_UNITY_RANGE. */ - bits = 14; - shift = 0; - zero = 0x6000; - break; - default: - WARN_ON(1); - return; - } - - for (i = 0; i < size; i++) { - r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift; - g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift; - b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift; - writew(r, lut + (i * 0x08) + 0); - writew(g, lut + (i * 0x08) + 2); - writew(b, lut + (i * 0x08) + 4); - } - - /* INTERPOLATE modes require a "next" entry to interpolate with, - * so we replicate the last entry to deal with this for now. - */ - writew(r, lut + (i * 0x08) + 0); - writew(g, lut + (i * 0x08) + 2); - writew(b, lut + (i * 0x08) + 4); -} - -static void -nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 7))) { - if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); - evo_data(push, 0x80000000 | asyh->lut.mode << 30); - evo_data(push, asyh->lut.offset >> 8); - } else - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); - evo_data(push, 0x80000000 | asyh->lut.mode << 30); - evo_data(push, asyh->lut.offset >> 8); - evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); - evo_data(push, asyh->lut.handle); - } else { - evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4); - evo_data(push, 0x80000000 | asyh->lut.mode << 24); - evo_data(push, asyh->lut.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); - evo_data(push, asyh->lut.handle); - } - evo_kick(push, core); - } -} - -static void -nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - struct nv50_head_mode *m = &asyh->mode; - u32 *push; - if ((push = evo_wait(core, 14))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2); - evo_data(push, 0x00800000 | m->clock); - evo_data(push, m->interlace ? 0x00000002 : 0x00000000); - evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7); - evo_data(push, 0x00000000); - evo_data(push, (m->v.active << 16) | m->h.active ); - evo_data(push, (m->v.synce << 16) | m->h.synce ); - evo_data(push, (m->v.blanke << 16) | m->h.blanke ); - evo_data(push, (m->v.blanks << 16) | m->h.blanks ); - evo_data(push, (m->v.blank2e << 16) | m->v.blank2s); - evo_data(push, asyh->mode.v.blankus); - evo_mthd(push, 0x082c + (head->base.index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6); - evo_data(push, 0x00000000); - evo_data(push, (m->v.active << 16) | m->h.active ); - evo_data(push, (m->v.synce << 16) | m->h.synce ); - evo_data(push, (m->v.blanke << 16) | m->h.blanke ); - evo_data(push, (m->v.blanks << 16) | m->h.blanks ); - evo_data(push, (m->v.blank2e << 16) | m->v.blank2s); - evo_mthd(push, 0x042c + (head->base.index * 0x300), 2); - evo_data(push, 0x00000000); /* ??? */ - evo_data(push, 0xffffff00); - evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3); - evo_data(push, m->clock * 1000); - evo_data(push, 0x00200000); /* ??? */ - evo_data(push, m->clock * 1000); - } - evo_kick(push, core); - } -} - -static void -nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; - u32 *push; - if ((push = evo_wait(core, 10))) { - if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1); - evo_data(push, (asyh->view.iH << 16) | asyh->view.iW); - evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2); - evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); - evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); - } else { - evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1); - evo_data(push, (asyh->view.iH << 16) | asyh->view.iW); - evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3); - evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); - evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); - evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); - } - evo_kick(push, core); - } -} - -static const struct nv50_head_func -head507d = { - .view = nv50_head_view, - .mode = nv50_head_mode, - .ilut_set = nv50_head_lut_set, - .ilut_clr = nv50_head_lut_clr, - .core_set = nv50_head_core_set, - .core_clr = nv50_head_core_clr, - .curs_set = nv50_head_curs_set, - .curs_clr = nv50_head_curs_clr, - .base = nv50_head_base, - .ovly = nv50_head_ovly, - .dither = nv50_head_dither, - .procamp = nv50_head_procamp, - .or = head907d_or, -}; - -static void -nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y) -{ - if (asyh->clr.ilut && (!asyh->set.ilut || y)) - head->func->ilut_clr(head); - if (asyh->clr.core && (!asyh->set.core || y)) - head->func->core_clr(head); - if (asyh->clr.curs && (!asyh->set.curs || y)) - head->func->curs_clr(head); -} - -static void -nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - if (asyh->set.view ) head->func->view (head, asyh); - if (asyh->set.mode ) head->func->mode (head, asyh); - if (asyh->set.ilut ) { - struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next]; - struct drm_property_blob *blob = asyh->state.gamma_lut; - if (blob) - nv50_head_lut_load(blob, asyh->lut.mode, nvbo); - asyh->lut.offset = nvbo->bo.offset; - head->lut.next ^= 1; - head->func->ilut_set(head, asyh); - } - if (asyh->set.core ) head->func->core_set(head, asyh); - if (asyh->set.curs ) head->func->curs_set(head, asyh); - if (asyh->set.base ) head->func->base (head, asyh); - if (asyh->set.ovly ) head->func->ovly (head, asyh); - if (asyh->set.dither ) head->func->dither (head, asyh); - if (asyh->set.procamp) head->func->procamp (head, asyh); - if (asyh->set.or ) head->func->or (head, asyh); -} - -static void -nv50_head_atomic_check_procamp(struct nv50_head_atom *armh, - struct nv50_head_atom *asyh, - struct nouveau_conn_atom *asyc) -{ - const int vib = asyc->procamp.color_vibrance - 100; - const int hue = asyc->procamp.vibrant_hue - 90; - const int adj = (vib > 0) ? 50 : 0; - asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff; - asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff; - asyh->set.procamp = true; -} - -static void -nv50_head_atomic_check_dither(struct nv50_head_atom *armh, - struct nv50_head_atom *asyh, - struct nouveau_conn_atom *asyc) -{ - struct drm_connector *connector = asyc->state.connector; - u32 mode = 0x00; - - if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > connector->display_info.bpc * 3) - mode = DITHERING_MODE_DYNAMIC2X2; - } else { - mode = asyc->dither.mode; - } - - if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) - mode |= DITHERING_DEPTH_8BPC; - } else { - mode |= asyc->dither.depth; - } - - asyh->dither.enable = mode; - asyh->dither.bits = mode >> 1; - asyh->dither.mode = mode >> 3; - asyh->set.dither = true; -} - -static void -nv50_head_atomic_check_view(struct nv50_head_atom *armh, - struct nv50_head_atom *asyh, - struct nouveau_conn_atom *asyc) -{ - struct drm_connector *connector = asyc->state.connector; - struct drm_display_mode *omode = &asyh->state.adjusted_mode; - struct drm_display_mode *umode = &asyh->state.mode; - int mode = asyc->scaler.mode; - struct edid *edid; - int umode_vdisplay, omode_hdisplay, omode_vdisplay; - - if (connector->edid_blob_ptr) - edid = (struct edid *)connector->edid_blob_ptr->data; - else - edid = NULL; - - if (!asyc->scaler.full) { - if (mode == DRM_MODE_SCALE_NONE) - omode = umode; - } else { - /* Non-EDID LVDS/eDP mode. */ - mode = DRM_MODE_SCALE_FULLSCREEN; - } - - /* For the user-specified mode, we must ignore doublescan and - * the like, but honor frame packing. - */ - umode_vdisplay = umode->vdisplay; - if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) - umode_vdisplay += umode->vtotal; - asyh->view.iW = umode->hdisplay; - asyh->view.iH = umode_vdisplay; - /* For the output mode, we can just use the stock helper. */ - drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay); - asyh->view.oW = omode_hdisplay; - asyh->view.oH = omode_vdisplay; - - /* Add overscan compensation if necessary, will keep the aspect - * ratio the same as the backend mode unless overridden by the - * user setting both hborder and vborder properties. - */ - if ((asyc->scaler.underscan.mode == UNDERSCAN_ON || - (asyc->scaler.underscan.mode == UNDERSCAN_AUTO && - drm_detect_hdmi_monitor(edid)))) { - u32 bX = asyc->scaler.underscan.hborder; - u32 bY = asyc->scaler.underscan.vborder; - u32 r = (asyh->view.oH << 19) / asyh->view.oW; - - if (bX) { - asyh->view.oW -= (bX * 2); - if (bY) asyh->view.oH -= (bY * 2); - else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; - } else { - asyh->view.oW -= (asyh->view.oW >> 4) + 32; - if (bY) asyh->view.oH -= (bY * 2); - else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; - } - } - - /* Handle CENTER/ASPECT scaling, taking into account the areas - * removed already for overscan compensation. - */ - switch (mode) { - case DRM_MODE_SCALE_CENTER: - asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW); - asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH); - /* fall-through */ - case DRM_MODE_SCALE_ASPECT: - if (asyh->view.oH < asyh->view.oW) { - u32 r = (asyh->view.iW << 19) / asyh->view.iH; - asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19; - } else { - u32 r = (asyh->view.iH << 19) / asyh->view.iW; - asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; - } - break; - default: - break; - } - - asyh->set.view = true; -} - -static void -nv50_head_atomic_check_lut(struct nv50_head *head, - struct nv50_head_atom *armh, - struct nv50_head_atom *asyh) -{ - struct nv50_disp *disp = nv50_disp(head->base.base.dev); - - /* An I8 surface without an input LUT makes no sense, and - * EVO will throw an error if you try. - * - * Legacy clients actually cause this due to the order in - * which they call ioctls, so we will enable the LUT with - * whatever contents the buffer already contains to avoid - * triggering the error check. - */ - if (!asyh->state.gamma_lut && asyh->base.cpp != 1) { - asyh->lut.handle = 0; - asyh->clr.ilut = armh->lut.visible; - return; - } - - if (disp->disp->object.oclass < GF110_DISP) { - asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1; - asyh->set.ilut = true; - } else { - asyh->lut.mode = 7; - asyh->set.ilut = asyh->state.color_mgmt_changed; - } - asyh->lut.handle = disp->core->chan.vram.handle; -} - -static void -nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) -{ - struct drm_display_mode *mode = &asyh->state.adjusted_mode; - struct nv50_head_mode *m = &asyh->mode; - u32 blankus; - - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE); - - /* - * DRM modes are defined in terms of a repeating interval - * starting with the active display area. The hardware modes - * are defined in terms of a repeating interval starting one - * unit (pixel or line) into the sync pulse. So, add bias. - */ - - m->h.active = mode->crtc_htotal; - m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1; - m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1; - m->h.blanks = m->h.blanke + mode->crtc_hdisplay; - - m->v.active = mode->crtc_vtotal; - m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1; - m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1; - m->v.blanks = m->v.blanke + mode->crtc_vdisplay; - - /*XXX: Safe underestimate, even "0" works */ - blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active; - blankus *= 1000; - blankus /= mode->crtc_clock; - m->v.blankus = blankus; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - m->v.blank2e = m->v.active + m->v.blanke; - m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay; - m->v.active = (m->v.active * 2) + 1; - m->interlace = true; - } else { - m->v.blank2e = 0; - m->v.blank2s = 1; - m->interlace = false; - } - m->clock = mode->crtc_clock; - - asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); - asyh->set.or = head->func->or != NULL; - asyh->set.mode = true; -} - -static int -nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) -{ - struct nouveau_drm *drm = nouveau_drm(crtc->dev); - struct nv50_disp *disp = nv50_disp(crtc->dev); - struct nv50_head *head = nv50_head(crtc); - struct nv50_head_atom *armh = nv50_head_atom(crtc->state); - struct nv50_head_atom *asyh = nv50_head_atom(state); - struct nouveau_conn_atom *asyc = NULL; - struct drm_connector_state *conns; - struct drm_connector *conn; - int i; - - NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active); - if (asyh->state.active) { - for_each_new_connector_in_state(asyh->state.state, conn, conns, i) { - if (conns->crtc == crtc) { - asyc = nouveau_conn_atom(conns); - break; - } - } - - if (armh->state.active) { - if (asyc) { - if (asyh->state.mode_changed) - asyc->set.scaler = true; - if (armh->base.depth != asyh->base.depth) - asyc->set.dither = true; - } - } else { - if (asyc) - asyc->set.mask = ~0; - asyh->set.mask = ~0; - asyh->set.or = head->func->or != NULL; - } - - if (asyh->state.mode_changed) - nv50_head_atomic_check_mode(head, asyh); - - if (asyh->state.color_mgmt_changed || - asyh->base.cpp != armh->base.cpp) - nv50_head_atomic_check_lut(head, armh, asyh); - asyh->lut.visible = asyh->lut.handle != 0; - - if (asyc) { - if (asyc->set.scaler) - nv50_head_atomic_check_view(armh, asyh, asyc); - if (asyc->set.dither) - nv50_head_atomic_check_dither(armh, asyh, asyc); - if (asyc->set.procamp) - nv50_head_atomic_check_procamp(armh, asyh, asyc); - } - - if ((asyh->core.visible = (asyh->base.cpp != 0))) { - asyh->core.x = asyh->base.x; - asyh->core.y = asyh->base.y; - asyh->core.w = asyh->base.w; - asyh->core.h = asyh->base.h; - } else - if ((asyh->core.visible = asyh->curs.visible) || - (asyh->core.visible = asyh->lut.visible)) { - /*XXX: We need to either find some way of having the - * primary base layer appear black, while still - * being able to display the other layers, or we - * need to allocate a dummy black surface here. - */ - asyh->core.x = 0; - asyh->core.y = 0; - asyh->core.w = asyh->state.mode.hdisplay; - asyh->core.h = asyh->state.mode.vdisplay; - } - asyh->core.handle = disp->core->chan.vram.handle; - asyh->core.offset = 0; - asyh->core.format = 0xcf; - asyh->core.kind = 0; - asyh->core.layout = 1; - asyh->core.block = 0; - asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4; - asyh->set.base = armh->base.cpp != asyh->base.cpp; - asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp; - } else { - asyh->lut.visible = false; - asyh->core.visible = false; - asyh->curs.visible = false; - asyh->base.cpp = 0; - asyh->ovly.cpp = 0; - } - - if (!drm_atomic_crtc_needs_modeset(&asyh->state)) { - if (asyh->core.visible) { - if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core))) - asyh->set.core = true; - } else - if (armh->core.visible) { - asyh->clr.core = true; - } - - if (asyh->curs.visible) { - if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs))) - asyh->set.curs = true; - } else - if (armh->curs.visible) { - asyh->clr.curs = true; - } - } else { - asyh->clr.ilut = armh->lut.visible; - asyh->clr.core = armh->core.visible; - asyh->clr.curs = armh->curs.visible; - asyh->set.ilut = asyh->lut.visible; - asyh->set.core = asyh->core.visible; - asyh->set.curs = asyh->curs.visible; - } - - if (asyh->clr.mask || asyh->set.mask) - nv50_atom(asyh->state.state)->lock_core = true; - return 0; -} - -static const struct drm_crtc_helper_funcs -nv50_head_help = { - .atomic_check = nv50_head_atomic_check, -}; - -static void -nv50_head_atomic_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - struct nv50_head_atom *asyh = nv50_head_atom(state); - __drm_atomic_helper_crtc_destroy_state(&asyh->state); - kfree(asyh); -} - -static struct drm_crtc_state * -nv50_head_atomic_duplicate_state(struct drm_crtc *crtc) -{ - struct nv50_head_atom *armh = nv50_head_atom(crtc->state); - struct nv50_head_atom *asyh; - if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL))) - return NULL; - __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state); - asyh->view = armh->view; - asyh->mode = armh->mode; - asyh->lut = armh->lut; - asyh->core = armh->core; - asyh->curs = armh->curs; - asyh->base = armh->base; - asyh->ovly = armh->ovly; - asyh->dither = armh->dither; - asyh->procamp = armh->procamp; - asyh->clr.mask = 0; - asyh->set.mask = 0; - return &asyh->state; -} - -static void -__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - if (crtc->state) - crtc->funcs->atomic_destroy_state(crtc, crtc->state); - crtc->state = state; - crtc->state->crtc = crtc; -} - -static void -nv50_head_reset(struct drm_crtc *crtc) -{ - struct nv50_head_atom *asyh; - - if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL)))) - return; - - __drm_atomic_helper_crtc_reset(crtc, &asyh->state); -} - -static void -nv50_head_destroy(struct drm_crtc *crtc) -{ - struct nv50_head *head = nv50_head(crtc); - int i; - - for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) - nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]); - - drm_crtc_cleanup(crtc); - kfree(head); -} - -static const struct drm_crtc_funcs -nv50_head_func = { - .reset = nv50_head_reset, - .gamma_set = drm_atomic_helper_legacy_gamma_set, - .destroy = nv50_head_destroy, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = nv50_head_atomic_duplicate_state, - .atomic_destroy_state = nv50_head_atomic_destroy_state, -}; - -static int -nv50_head_create(struct drm_device *dev, int index) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - struct nv50_disp *disp = nv50_disp(dev); - struct nv50_head *head; - struct nv50_wndw *curs, *wndw; - struct drm_crtc *crtc; - int ret, i; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return -ENOMEM; - - head->func = disp->core->func->head; - head->base.index = index; - ret = nv50_base_new(drm, head->base.index, &wndw); - if (ret == 0) - ret = nv50_curs_new(drm, head->base.index, &curs); - if (ret) { - kfree(head); - return ret; - } - - crtc = &head->base.base; - drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane, - &nv50_head_func, "head-%d", head->base.index); - drm_crtc_helper_add(crtc, &nv50_head_help); - drm_mode_crtc_set_gamma_size(crtc, 256); - - for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) { - ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100, - TTM_PL_FLAG_VRAM, - &head->lut.nvbo[i]); - if (ret) - goto out; - } - - /* allocate overlay resources */ - ret = nv50_ovly_new(drm, head->base.index, &wndw); -out: - if (ret) - nv50_head_destroy(crtc); - return ret; -} - -static const struct nv50_outp_func dac507d; -static const struct nv50_outp_func sor507d; -static const struct nv50_outp_func pior507d; -static const struct nv50_core_func -core507d = { - .head = &head507d, - .dac = &dac507d, - .sor = &sor507d, - .pior = &pior507d, -}; - -static int -core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore) -{ - return core507d_new_(&core507d, drm, oclass, pcore); -} - -static void -nv50_core_del(struct nv50_core **pcore) -{ - struct nv50_core *core = *pcore; - if (core) { - nv50_dmac_destroy(&core->chan); - kfree(*pcore); - *pcore = NULL; - } -} - -static int -nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore) -{ - struct { - s32 oclass; - int version; - int (*new)(struct nouveau_drm *, s32, struct nv50_core **); - } cores[] = { - { GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - { NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new }, - {} - }; - struct nv50_disp *disp = nv50_disp(drm->dev); - int cid; - - cid = nvif_mclass(&disp->disp->object, cores); - if (cid < 0) { - NV_ERROR(drm, "No supported core channel class\n"); - return cid; - } - - return cores[cid].new(drm, cores[cid].oclass, pcore); -} - /****************************************************************************** * Output path helpers *****************************************************************************/ @@ -2721,33 +327,6 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, * DAC *****************************************************************************/ static void -dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl, - struct nv50_head_atom *asyh) -{ - u32 *push, sync = 0; - if ((push = evo_wait(&core->chan, 3))) { - if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - if (asyh) { - sync |= asyh->or.nvsync << 1; - sync |= asyh->or.nhsync; - } - evo_mthd(push, 0x0400 + (or * 0x080), 2); - evo_data(push, ctrl); - evo_data(push, sync); - } else { - evo_mthd(push, 0x0180 + (or * 0x020), 1); - evo_data(push, ctrl); - } - evo_kick(push, &core->chan); - } -} - -static const struct nv50_outp_func -dac507d = { - .ctrl = dac507d_ctrl, -}; - -static void nv50_dac_disable(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); @@ -3635,32 +1214,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, * SOR *****************************************************************************/ static void -sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl, - struct nv50_head_atom *asyh) -{ - u32 *push; - if ((push = evo_wait(&core->chan, 6))) { - if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - if (asyh) { - ctrl |= asyh->or.depth << 16; - ctrl |= asyh->or.nvsync << 13; - ctrl |= asyh->or.nhsync << 12; - } - evo_mthd(push, 0x0600 + (or * 0x40), 1); - } else { - evo_mthd(push, 0x0200 + (or * 0x20), 1); - } - evo_data(push, ctrl); - evo_kick(push, &core->chan); - } -} - -static const struct nv50_outp_func -sor507d = { - .ctrl = sor507d_ctrl, -}; - -static void nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head, struct nv50_head_atom *asyh, u8 proto, u8 depth) { @@ -3904,30 +1457,6 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) /****************************************************************************** * PIOR *****************************************************************************/ -static void -pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl, - struct nv50_head_atom *asyh) -{ - u32 *push; - if ((push = evo_wait(&core->chan, 8))) { - if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { - if (asyh) { - ctrl |= asyh->or.depth << 16; - ctrl |= asyh->or.nvsync << 13; - ctrl |= asyh->or.nhsync << 12; - } - evo_mthd(push, 0x0700 + (or * 0x040), 1); - evo_data(push, ctrl); - } - evo_kick(push, &core->chan); - } -} - -static const struct nv50_outp_func -pior507d = { - .ctrl = pior507d_ctrl, -}; - static int nv50_pior_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h new file mode 100644 index 000000000000..7cbd66849743 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -0,0 +1,71 @@ +#ifndef __NV50_KMS_H__ +#define __NV50_KMS_H__ +#include <nvif/mem.h> + +#include "nouveau_display.h" + +struct nv50_disp { + struct nvif_disp *disp; + struct nv50_core *core; + +#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o)) +#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00) +#define NV50_DISP_WNDW_SEM0(c) NV50_DISP_SYNC(1 + (c), 0x00) +#define NV50_DISP_WNDW_SEM1(c) NV50_DISP_SYNC(1 + (c), 0x10) +#define NV50_DISP_WNDW_NTFY(c) NV50_DISP_SYNC(1 + (c), 0x20) +#define NV50_DISP_BASE_SEM0(c) NV50_DISP_WNDW_SEM0(0 + (c)) +#define NV50_DISP_BASE_SEM1(c) NV50_DISP_WNDW_SEM1(0 + (c)) +#define NV50_DISP_BASE_NTFY(c) NV50_DISP_WNDW_NTFY(0 + (c)) + struct nouveau_bo *sync; + + struct mutex mutex; +}; + +static inline struct nv50_disp * +nv50_disp(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +struct nv50_chan { + struct nvif_object user; + struct nvif_device *device; +}; + +struct nv50_dmac { + struct nv50_chan base; + + struct nvif_mem push; + u32 *ptr; + + struct nvif_object sync; + struct nvif_object vram; + + /* Protects against concurrent pushbuf access to this channel, lock is + * grabbed by evo_wait (if the pushbuf reservation is successful) and + * dropped again by evo_kick. */ + struct mutex lock; +}; + +int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, + const s32 *oclass, u8 head, void *data, u32 size, + u64 syncbuf, struct nv50_dmac *dmac); +void nv50_dmac_destroy(struct nv50_dmac *); + +u32 *evo_wait(struct nv50_dmac *, int nr); +void evo_kick(u32 *, struct nv50_dmac *); + +#define evo_mthd(p, m, s) do { \ + const u32 _m = (m), _s = (s); \ + if (drm_debug & DRM_UT_KMS) \ + pr_err("%04x %d %s\n", _m, _s, __func__); \ + *((p)++) = ((_s << 18) | _m); \ +} while(0) + +#define evo_data(p, d) do { \ + const u32 _d = (d); \ + if (drm_debug & DRM_UT_KMS) \ + pr_err("\t%08x\n", _d); \ + *((p)++) = _d; \ +} while(0) +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c new file mode 100644 index 000000000000..6a809ff24e14 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -0,0 +1,566 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "head.h" +#include "base.h" +#include "core.h" +#include "curs.h" +#include "ovly.h" + +#include <nvif/class.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include "nouveau_connector.h" +#include "nouveau_bo.h" + +static void +nv50_head_lut_load(struct drm_property_blob *blob, int mode, + struct nouveau_bo *nvbo) +{ + struct drm_color_lut *in = (struct drm_color_lut *)blob->data; + void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo); + const int size = blob->length / sizeof(*in); + int bits, shift, i; + u16 zero, r, g, b; + + /* This can't happen.. But it shuts the compiler up. */ + if (WARN_ON(size != 256)) + return; + + switch (mode) { + case 0: /* LORES. */ + case 1: /* HIRES. */ + bits = 11; + shift = 3; + zero = 0x0000; + break; + case 7: /* INTERPOLATE_257_UNITY_RANGE. */ + bits = 14; + shift = 0; + zero = 0x6000; + break; + default: + WARN_ON(1); + return; + } + + for (i = 0; i < size; i++) { + r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift; + g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift; + b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift; + writew(r, lut + (i * 0x08) + 0); + writew(g, lut + (i * 0x08) + 2); + writew(b, lut + (i * 0x08) + 4); + } + + /* INTERPOLATE modes require a "next" entry to interpolate with, + * so we replicate the last entry to deal with this for now. + */ + writew(r, lut + (i * 0x08) + 0); + writew(g, lut + (i * 0x08) + 2); + writew(b, lut + (i * 0x08) + 4); +} + +void +nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y) +{ + if (asyh->clr.ilut && (!asyh->set.ilut || y)) + head->func->ilut_clr(head); + if (asyh->clr.core && (!asyh->set.core || y)) + head->func->core_clr(head); + if (asyh->clr.curs && (!asyh->set.curs || y)) + head->func->curs_clr(head); +} + +void +nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + if (asyh->set.view ) head->func->view (head, asyh); + if (asyh->set.mode ) head->func->mode (head, asyh); + if (asyh->set.ilut ) { + struct nouveau_bo *nvbo = head->ilut.nvbo[head->ilut.next]; + struct drm_property_blob *blob = asyh->state.gamma_lut; + if (blob) + nv50_head_lut_load(blob, asyh->ilut.mode, nvbo); + asyh->ilut.offset = nvbo->bo.offset; + head->ilut.next ^= 1; + head->func->ilut_set(head, asyh); + } + if (asyh->set.core ) head->func->core_set(head, asyh); + if (asyh->set.curs ) head->func->curs_set(head, asyh); + if (asyh->set.base ) head->func->base (head, asyh); + if (asyh->set.ovly ) head->func->ovly (head, asyh); + if (asyh->set.dither ) head->func->dither (head, asyh); + if (asyh->set.procamp) head->func->procamp (head, asyh); + if (asyh->set.or ) head->func->or (head, asyh); +} + +static void +nv50_head_atomic_check_procamp(struct nv50_head_atom *armh, + struct nv50_head_atom *asyh, + struct nouveau_conn_atom *asyc) +{ + const int vib = asyc->procamp.color_vibrance - 100; + const int hue = asyc->procamp.vibrant_hue - 90; + const int adj = (vib > 0) ? 50 : 0; + asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff; + asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff; + asyh->set.procamp = true; +} + +static void +nv50_head_atomic_check_dither(struct nv50_head_atom *armh, + struct nv50_head_atom *asyh, + struct nouveau_conn_atom *asyc) +{ + struct drm_connector *connector = asyc->state.connector; + u32 mode = 0x00; + + if (asyc->dither.mode == DITHERING_MODE_AUTO) { + if (asyh->base.depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = asyc->dither.mode; + } + + if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= asyc->dither.depth; + } + + asyh->dither.enable = mode; + asyh->dither.bits = mode >> 1; + asyh->dither.mode = mode >> 3; + asyh->set.dither = true; +} + +static void +nv50_head_atomic_check_view(struct nv50_head_atom *armh, + struct nv50_head_atom *asyh, + struct nouveau_conn_atom *asyc) +{ + struct drm_connector *connector = asyc->state.connector; + struct drm_display_mode *omode = &asyh->state.adjusted_mode; + struct drm_display_mode *umode = &asyh->state.mode; + int mode = asyc->scaler.mode; + struct edid *edid; + int umode_vdisplay, omode_hdisplay, omode_vdisplay; + + if (connector->edid_blob_ptr) + edid = (struct edid *)connector->edid_blob_ptr->data; + else + edid = NULL; + + if (!asyc->scaler.full) { + if (mode == DRM_MODE_SCALE_NONE) + omode = umode; + } else { + /* Non-EDID LVDS/eDP mode. */ + mode = DRM_MODE_SCALE_FULLSCREEN; + } + + /* For the user-specified mode, we must ignore doublescan and + * the like, but honor frame packing. + */ + umode_vdisplay = umode->vdisplay; + if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) + umode_vdisplay += umode->vtotal; + asyh->view.iW = umode->hdisplay; + asyh->view.iH = umode_vdisplay; + /* For the output mode, we can just use the stock helper. */ + drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay); + asyh->view.oW = omode_hdisplay; + asyh->view.oH = omode_vdisplay; + + /* Add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if ((asyc->scaler.underscan.mode == UNDERSCAN_ON || + (asyc->scaler.underscan.mode == UNDERSCAN_AUTO && + drm_detect_hdmi_monitor(edid)))) { + u32 bX = asyc->scaler.underscan.hborder; + u32 bY = asyc->scaler.underscan.vborder; + u32 r = (asyh->view.oH << 19) / asyh->view.oW; + + if (bX) { + asyh->view.oW -= (bX * 2); + if (bY) asyh->view.oH -= (bY * 2); + else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; + } else { + asyh->view.oW -= (asyh->view.oW >> 4) + 32; + if (bY) asyh->view.oH -= (bY * 2); + else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; + } + } + + /* Handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation. + */ + switch (mode) { + case DRM_MODE_SCALE_CENTER: + asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW); + asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (asyh->view.oH < asyh->view.oW) { + u32 r = (asyh->view.iW << 19) / asyh->view.iH; + asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19; + } else { + u32 r = (asyh->view.iH << 19) / asyh->view.iW; + asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; + } + break; + default: + break; + } + + asyh->set.view = true; +} + +static void +nv50_head_atomic_check_lut(struct nv50_head *head, + struct nv50_head_atom *armh, + struct nv50_head_atom *asyh) +{ + struct nv50_disp *disp = nv50_disp(head->base.base.dev); + + /* An I8 surface without an input LUT makes no sense, and + * EVO will throw an error if you try. + * + * Legacy clients actually cause this due to the order in + * which they call ioctls, so we will enable the LUT with + * whatever contents the buffer already contains to avoid + * triggering the error check. + */ + if (!asyh->state.gamma_lut && asyh->base.cpp != 1) { + asyh->ilut.handle = 0; + asyh->clr.ilut = armh->ilut.visible; + return; + } + + if (disp->disp->object.oclass < GF110_DISP) { + asyh->ilut.mode = (asyh->base.cpp == 1) ? 0 : 1; + asyh->set.ilut = true; + } else { + asyh->ilut.mode = 7; + asyh->set.ilut = asyh->state.color_mgmt_changed; + } + asyh->ilut.handle = disp->core->chan.vram.handle; +} + +static void +nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct drm_display_mode *mode = &asyh->state.adjusted_mode; + struct nv50_head_mode *m = &asyh->mode; + u32 blankus; + + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE); + + /* + * DRM modes are defined in terms of a repeating interval + * starting with the active display area. The hardware modes + * are defined in terms of a repeating interval starting one + * unit (pixel or line) into the sync pulse. So, add bias. + */ + + m->h.active = mode->crtc_htotal; + m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1; + m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1; + m->h.blanks = m->h.blanke + mode->crtc_hdisplay; + + m->v.active = mode->crtc_vtotal; + m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1; + m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1; + m->v.blanks = m->v.blanke + mode->crtc_vdisplay; + + /*XXX: Safe underestimate, even "0" works */ + blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active; + blankus *= 1000; + blankus /= mode->crtc_clock; + m->v.blankus = blankus; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + m->v.blank2e = m->v.active + m->v.blanke; + m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay; + m->v.active = (m->v.active * 2) + 1; + m->interlace = true; + } else { + m->v.blank2e = 0; + m->v.blank2s = 1; + m->interlace = false; + } + m->clock = mode->crtc_clock; + + asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); + asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); + asyh->set.or = head->func->or != NULL; + asyh->set.mode = true; +} + +static int +nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nv50_disp *disp = nv50_disp(crtc->dev); + struct nv50_head *head = nv50_head(crtc); + struct nv50_head_atom *armh = nv50_head_atom(crtc->state); + struct nv50_head_atom *asyh = nv50_head_atom(state); + struct nouveau_conn_atom *asyc = NULL; + struct drm_connector_state *conns; + struct drm_connector *conn; + int i; + + NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active); + if (asyh->state.active) { + for_each_new_connector_in_state(asyh->state.state, conn, conns, i) { + if (conns->crtc == crtc) { + asyc = nouveau_conn_atom(conns); + break; + } + } + + if (armh->state.active) { + if (asyc) { + if (asyh->state.mode_changed) + asyc->set.scaler = true; + if (armh->base.depth != asyh->base.depth) + asyc->set.dither = true; + } + } else { + if (asyc) + asyc->set.mask = ~0; + asyh->set.mask = ~0; + asyh->set.or = head->func->or != NULL; + } + + if (asyh->state.mode_changed) + nv50_head_atomic_check_mode(head, asyh); + + if (asyh->state.color_mgmt_changed || + asyh->base.cpp != armh->base.cpp) + nv50_head_atomic_check_lut(head, armh, asyh); + asyh->ilut.visible = asyh->ilut.handle != 0; + + if (asyc) { + if (asyc->set.scaler) + nv50_head_atomic_check_view(armh, asyh, asyc); + if (asyc->set.dither) + nv50_head_atomic_check_dither(armh, asyh, asyc); + if (asyc->set.procamp) + nv50_head_atomic_check_procamp(armh, asyh, asyc); + } + + if ((asyh->core.visible = (asyh->base.cpp != 0))) { + asyh->core.x = asyh->base.x; + asyh->core.y = asyh->base.y; + asyh->core.w = asyh->base.w; + asyh->core.h = asyh->base.h; + } else + if ((asyh->core.visible = asyh->curs.visible) || + (asyh->core.visible = asyh->ilut.visible)) { + /*XXX: We need to either find some way of having the + * primary base layer appear black, while still + * being able to display the other layers, or we + * need to allocate a dummy black surface here. + */ + asyh->core.x = 0; + asyh->core.y = 0; + asyh->core.w = asyh->state.mode.hdisplay; + asyh->core.h = asyh->state.mode.vdisplay; + } + asyh->core.handle = disp->core->chan.vram.handle; + asyh->core.offset = 0; + asyh->core.format = 0xcf; + asyh->core.kind = 0; + asyh->core.layout = 1; + asyh->core.block = 0; + asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4; + asyh->set.base = armh->base.cpp != asyh->base.cpp; + asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp; + } else { + asyh->ilut.visible = false; + asyh->core.visible = false; + asyh->curs.visible = false; + asyh->base.cpp = 0; + asyh->ovly.cpp = 0; + } + + if (!drm_atomic_crtc_needs_modeset(&asyh->state)) { + if (asyh->core.visible) { + if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core))) + asyh->set.core = true; + } else + if (armh->core.visible) { + asyh->clr.core = true; + } + + if (asyh->curs.visible) { + if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs))) + asyh->set.curs = true; + } else + if (armh->curs.visible) { + asyh->clr.curs = true; + } + } else { + asyh->clr.ilut = armh->ilut.visible; + asyh->clr.core = armh->core.visible; + asyh->clr.curs = armh->curs.visible; + asyh->set.ilut = asyh->ilut.visible; + asyh->set.core = asyh->core.visible; + asyh->set.curs = asyh->curs.visible; + } + + if (asyh->clr.mask || asyh->set.mask) + nv50_atom(asyh->state.state)->lock_core = true; + return 0; +} + +static const struct drm_crtc_helper_funcs +nv50_head_help = { + .atomic_check = nv50_head_atomic_check, +}; + +static void +nv50_head_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct nv50_head_atom *asyh = nv50_head_atom(state); + __drm_atomic_helper_crtc_destroy_state(&asyh->state); + kfree(asyh); +} + +static struct drm_crtc_state * +nv50_head_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct nv50_head_atom *armh = nv50_head_atom(crtc->state); + struct nv50_head_atom *asyh; + if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL))) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state); + asyh->view = armh->view; + asyh->mode = armh->mode; + asyh->ilut = armh->ilut; + asyh->core = armh->core; + asyh->curs = armh->curs; + asyh->base = armh->base; + asyh->ovly = armh->ovly; + asyh->dither = armh->dither; + asyh->procamp = armh->procamp; + asyh->clr.mask = 0; + asyh->set.mask = 0; + return &asyh->state; +} + +static void +__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + if (crtc->state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + crtc->state = state; + crtc->state->crtc = crtc; +} + +static void +nv50_head_reset(struct drm_crtc *crtc) +{ + struct nv50_head_atom *asyh; + + if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL)))) + return; + + __drm_atomic_helper_crtc_reset(crtc, &asyh->state); +} + +static void +nv50_head_destroy(struct drm_crtc *crtc) +{ + struct nv50_head *head = nv50_head(crtc); + int i; + + for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++) + nouveau_bo_unmap_unpin_unref(&head->ilut.nvbo[i]); + + drm_crtc_cleanup(crtc); + kfree(head); +} + +static const struct drm_crtc_funcs +nv50_head_func = { + .reset = nv50_head_reset, + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .destroy = nv50_head_destroy, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = nv50_head_atomic_duplicate_state, + .atomic_destroy_state = nv50_head_atomic_destroy_state, +}; + +int +nv50_head_create(struct drm_device *dev, int index) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_disp *disp = nv50_disp(dev); + struct nv50_head *head; + struct nv50_wndw *curs, *wndw; + struct drm_crtc *crtc; + int ret, i; + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOMEM; + + head->func = disp->core->func->head; + head->base.index = index; + ret = nv50_base_new(drm, head->base.index, &wndw); + if (ret == 0) + ret = nv50_curs_new(drm, head->base.index, &curs); + if (ret) { + kfree(head); + return ret; + } + + crtc = &head->base.base; + drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane, + &nv50_head_func, "head-%d", head->base.index); + drm_crtc_helper_add(crtc, &nv50_head_help); + drm_mode_crtc_set_gamma_size(crtc, 256); + + for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++) { + ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100, + TTM_PL_FLAG_VRAM, + &head->ilut.nvbo[i]); + if (ret) + goto out; + } + + /* allocate overlay resources */ + ret = nv50_ovly_new(drm, head->base.index, &wndw); +out: + if (ret) + nv50_head_destroy(crtc); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h new file mode 100644 index 000000000000..23099a82883b --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -0,0 +1,39 @@ +#ifndef __NV50_KMS_HEAD_H__ +#define __NV50_KMS_HEAD_H__ +#define nv50_head(c) container_of((c), struct nv50_head, base.base) +#include "disp.h" +#include "atom.h" + +#include "nouveau_crtc.h" + +struct nv50_head { + const struct nv50_head_func *func; + struct nouveau_crtc base; + struct { + struct nouveau_bo *nvbo[2]; + int next; + } ilut; +}; + +int nv50_head_create(struct drm_device *, int index); +void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *); +void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y); + +struct nv50_head_func { + void (*view)(struct nv50_head *, struct nv50_head_atom *); + void (*mode)(struct nv50_head *, struct nv50_head_atom *); + void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *); + void (*ilut_clr)(struct nv50_head *); + void (*core_set)(struct nv50_head *, struct nv50_head_atom *); + void (*core_clr)(struct nv50_head *); + void (*curs_set)(struct nv50_head *, struct nv50_head_atom *); + void (*curs_clr)(struct nv50_head *); + void (*base)(struct nv50_head *, struct nv50_head_atom *); + void (*ovly)(struct nv50_head *, struct nv50_head_atom *); + void (*dither)(struct nv50_head *, struct nv50_head_atom *); + void (*procamp)(struct nv50_head *, struct nv50_head_atom *); + void (*or)(struct nv50_head *, struct nv50_head_atom *); +}; + +extern const struct nv50_head_func head507d; +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c new file mode 100644 index 000000000000..92fa249ba72f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c @@ -0,0 +1,403 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "head.h" +#include "core.h" + +#include <nvif/class.h> + +static void +head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA && + (push = evo_wait(core, 3))) { + evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2); + evo_data(push, 0x00000001 | (asyh->or.depth << 6) | + (asyh->or.nvsync << 4) | + (asyh->or.nhsync << 3)); + evo_data(push, 0x31ec6000 | (head->base.index << 25) | + asyh->mode.interlace); + evo_kick(push, core); + } +} + +static void +head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 2))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1); + else + evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1); + evo_data(push, (asyh->procamp.sat.sin << 20) | + (asyh->procamp.sat.cos << 8)); + evo_kick(push, core); + } +} + +static void +head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 2))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1); + else + if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1); + else + evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1); + evo_data(push, (asyh->dither.mode << 3) | + (asyh->dither.bits << 1) | + asyh->dither.enable); + evo_kick(push, core); + } +} + +static void +head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 bounds = 0; + u32 *push; + + if (asyh->ovly.cpp) { + switch (asyh->ovly.cpp) { + case 8: bounds |= 0x00000500; break; + case 4: bounds |= 0x00000300; break; + case 2: bounds |= 0x00000100; break; + default: + WARN_ON(1); + break; + } + bounds |= 0x00000001; + } + + if ((push = evo_wait(core, 2))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x0904 + head->base.index * 0x400, 1); + else + evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1); + evo_data(push, bounds); + evo_kick(push, core); + } +} + +static void +head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 bounds = 0; + u32 *push; + + if (asyh->base.cpp) { + switch (asyh->base.cpp) { + case 8: bounds |= 0x00000500; break; + case 4: bounds |= 0x00000300; break; + case 2: bounds |= 0x00000100; break; + case 1: bounds |= 0x00000000; break; + default: + WARN_ON(1); + break; + } + bounds |= 0x00000001; + } + + if ((push = evo_wait(core, 2))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x0900 + head->base.index * 0x400, 1); + else + evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1); + evo_data(push, bounds); + evo_kick(push, core); + } +} + +static void +head507d_curs_clr(struct nv50_head *head) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 4))) { + if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); + evo_data(push, 0x05000000); + } else + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x089c + head->base.index * 0x400, 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0480 + head->base.index * 0x300, 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x048c + head->base.index * 0x300, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, core); + } +} + +static void +head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 5))) { + if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) { + evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); + evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | + (asyh->curs.format << 24)); + evo_data(push, asyh->curs.offset >> 8); + } else + if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) { + evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); + evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | + (asyh->curs.format << 24)); + evo_data(push, asyh->curs.offset >> 8); + evo_mthd(push, 0x089c + head->base.index * 0x400, 1); + evo_data(push, asyh->curs.handle); + } else { + evo_mthd(push, 0x0480 + head->base.index * 0x300, 2); + evo_data(push, 0x80000000 | (asyh->curs.layout << 26) | + (asyh->curs.format << 24)); + evo_data(push, asyh->curs.offset >> 8); + evo_mthd(push, 0x048c + head->base.index * 0x300, 1); + evo_data(push, asyh->curs.handle); + } + evo_kick(push, core); + } +} + +static void +head507d_core_clr(struct nv50_head *head) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 2))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) + evo_mthd(push, 0x0874 + head->base.index * 0x400, 1); + else + evo_mthd(push, 0x0474 + head->base.index * 0x300, 1); + evo_data(push, 0x00000000); + evo_kick(push, core); + } +} + +static void +head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 9))) { + if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); + evo_data(push, asyh->core.offset >> 8); + evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); + evo_data(push, (asyh->core.h << 16) | asyh->core.w); + evo_data(push, asyh->core.layout << 20 | + (asyh->core.pitch >> 8) << 8 | + asyh->core.block); + evo_data(push, asyh->core.kind << 16 | + asyh->core.format << 8); + evo_data(push, asyh->core.handle); + evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); + evo_data(push, (asyh->core.y << 16) | asyh->core.x); + /* EVO will complain with INVALID_STATE if we have an + * active cursor and (re)specify HeadSetContextDmaIso + * without also updating HeadSetOffsetCursor. + */ + asyh->set.curs = asyh->curs.visible; + } else + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); + evo_data(push, asyh->core.offset >> 8); + evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); + evo_data(push, (asyh->core.h << 16) | asyh->core.w); + evo_data(push, asyh->core.layout << 20 | + (asyh->core.pitch >> 8) << 8 | + asyh->core.block); + evo_data(push, asyh->core.format << 8); + evo_data(push, asyh->core.handle); + evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); + evo_data(push, (asyh->core.y << 16) | asyh->core.x); + } else { + evo_mthd(push, 0x0460 + head->base.index * 0x300, 1); + evo_data(push, asyh->core.offset >> 8); + evo_mthd(push, 0x0468 + head->base.index * 0x300, 4); + evo_data(push, (asyh->core.h << 16) | asyh->core.w); + evo_data(push, asyh->core.layout << 24 | + (asyh->core.pitch >> 8) << 8 | + asyh->core.block); + evo_data(push, asyh->core.format << 8); + evo_data(push, asyh->core.handle); + evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1); + evo_data(push, (asyh->core.y << 16) | asyh->core.x); + } + evo_kick(push, core); + } +} + +static void +head507d_ilut_clr(struct nv50_head *head) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 4))) { + if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); + evo_data(push, 0x40000000); + } else + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); + evo_data(push, 0x40000000); + evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1); + evo_data(push, 0x03000000); + evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); + evo_data(push, 0x00000000); + } + evo_kick(push, core); + } +} + +static void +head507d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 7))) { + if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); + evo_data(push, 0x80000000 | asyh->ilut.mode << 30); + evo_data(push, asyh->ilut.offset >> 8); + } else + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); + evo_data(push, 0x80000000 | asyh->ilut.mode << 30); + evo_data(push, asyh->ilut.offset >> 8); + evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); + evo_data(push, asyh->ilut.handle); + } else { + evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4); + evo_data(push, 0x80000000 | asyh->ilut.mode << 24); + evo_data(push, asyh->ilut.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); + evo_data(push, asyh->ilut.handle); + } + evo_kick(push, core); + } +} + +static void +head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + struct nv50_head_mode *m = &asyh->mode; + u32 *push; + if ((push = evo_wait(core, 14))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2); + evo_data(push, 0x00800000 | m->clock); + evo_data(push, m->interlace ? 0x00000002 : 0x00000000); + evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7); + evo_data(push, 0x00000000); + evo_data(push, (m->v.active << 16) | m->h.active ); + evo_data(push, (m->v.synce << 16) | m->h.synce ); + evo_data(push, (m->v.blanke << 16) | m->h.blanke ); + evo_data(push, (m->v.blanks << 16) | m->h.blanks ); + evo_data(push, (m->v.blank2e << 16) | m->v.blank2s); + evo_data(push, asyh->mode.v.blankus); + evo_mthd(push, 0x082c + (head->base.index * 0x400), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6); + evo_data(push, 0x00000000); + evo_data(push, (m->v.active << 16) | m->h.active ); + evo_data(push, (m->v.synce << 16) | m->h.synce ); + evo_data(push, (m->v.blanke << 16) | m->h.blanke ); + evo_data(push, (m->v.blanks << 16) | m->h.blanks ); + evo_data(push, (m->v.blank2e << 16) | m->v.blank2s); + evo_mthd(push, 0x042c + (head->base.index * 0x300), 2); + evo_data(push, 0x00000000); /* ??? */ + evo_data(push, 0xffffff00); + evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3); + evo_data(push, m->clock * 1000); + evo_data(push, 0x00200000); /* ??? */ + evo_data(push, m->clock * 1000); + } + evo_kick(push, core); + } +} + +static void +head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; + u32 *push; + if ((push = evo_wait(core, 10))) { + if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1); + evo_data(push, (asyh->view.iH << 16) | asyh->view.iW); + evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2); + evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); + evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); + } else { + evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1); + evo_data(push, (asyh->view.iH << 16) | asyh->view.iW); + evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3); + evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); + evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); + evo_data(push, (asyh->view.oH << 16) | asyh->view.oW); + } + evo_kick(push, core); + } +} + +const struct nv50_head_func +head507d = { + .view = head507d_view, + .mode = head507d_mode, + .ilut_set = head507d_ilut_set, + .ilut_clr = head507d_ilut_clr, + .core_set = head507d_core_set, + .core_clr = head507d_core_clr, + .curs_set = head507d_curs_set, + .curs_clr = head507d_curs_clr, + .base = head507d_base, + .ovly = head507d_ovly, + .dither = head507d_dither, + .procamp = head507d_procamp, + .or = head907d_or, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.c b/drivers/gpu/drm/nouveau/dispnv50/oimm.c new file mode 100644 index 000000000000..2a2841d344c8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.c @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "oimm.h" + +#include <nvif/class.h> + +int +nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw) +{ + static const struct { + s32 oclass; + int version; + int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *); + } oimms[] = { + { GK104_DISP_OVERLAY, 0, oimm507b_init }, + { GF110_DISP_OVERLAY, 0, oimm507b_init }, + { GT214_DISP_OVERLAY, 0, oimm507b_init }, + { G82_DISP_OVERLAY, 0, oimm507b_init }, + { NV50_DISP_OVERLAY, 0, oimm507b_init }, + {} + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int cid; + + cid = nvif_mclass(&disp->disp->object, oimms); + if (cid < 0) { + NV_ERROR(drm, "No supported overlay immediate class\n"); + return cid; + } + + return oimms[cid].init(drm, oimms[cid].oclass, wndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.h b/drivers/gpu/drm/nouveau/dispnv50/oimm.h new file mode 100644 index 000000000000..6fa51f101e94 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.h @@ -0,0 +1,8 @@ +#ifndef __NV50_KMS_OIMM_H__ +#define __NV50_KMS_OIMM_H__ +#include "wndw.h" + +int oimm507b_init(struct nouveau_drm *, s32, struct nv50_wndw *); + +int nv50_oimm_init(struct nouveau_drm *, struct nv50_wndw *); +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c new file mode 100644 index 000000000000..c4baca82de14 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "oimm.h" + +#include <nvif/cl507b.h> + +static const struct nv50_wimm_func +oimm507b = { +}; + +static int +oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, + s32 oclass, struct nv50_wndw *wndw) +{ + struct nv50_disp_overlay_v0 args = { + .head = wndw->id, + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int ret; + + ret = nvif_object_init(&disp->disp->object, 0, oclass, &args, + sizeof(args), &wndw->wimm.base.user); + if (ret) { + NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret); + return ret; + } + + nvif_object_map(&wndw->wimm.base.user, NULL, 0); + wndw->immd = func; + return 0; +} + +int +oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw) +{ + return oimm507b_init_(&oimm507b, drm, oclass, wndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c new file mode 100644 index 000000000000..ac2d3b64f186 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ovly.h" +#include "oimm.h" + +#include <nvif/class.h> + +int +nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) +{ + static const struct { + s32 oclass; + int version; + int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); + } ovlys[] = { + { GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new }, + { GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new }, + { GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, + { GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, + { G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, + { NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new }, + {} + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + int cid, ret; + + cid = nvif_mclass(&disp->disp->object, ovlys); + if (cid < 0) { + NV_ERROR(drm, "No supported overlay class\n"); + return cid; + } + + ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw); + if (ret) + return ret; + + return nv50_oimm_init(drm, *pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h new file mode 100644 index 000000000000..90af1f2f0aa0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h @@ -0,0 +1,8 @@ +#ifndef __NV50_KMS_OVLY_H__ +#define __NV50_KMS_OVLY_H__ +#include "wndw.h" + +int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **); + +int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **); +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c new file mode 100644 index 000000000000..ceec5127a17d --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c @@ -0,0 +1,70 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ovly.h" + +#include <nvif/cl507e.h> + +#include "nouveau_bo.h" + +static const struct nv50_wndw_func +ovly507e = { +}; + +static const u32 +ovly507e_format[] = { + 0 +}; + +static int +ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format, + struct nouveau_drm *drm, int head, s32 oclass, + struct nv50_wndw **pwndw) +{ + struct nv50_disp_overlay_channel_dma_v0 args = { + .head = head, + }; + struct nv50_disp *disp = nv50_disp(drm->dev); + struct nv50_wndw *wndw; + int ret; + + ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY, + "ovly", head, format, &wndw); + if (*pwndw = wndw, ret) + return ret; + + ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, + &oclass, 0, &args, sizeof(args), + disp->sync->bo.offset, &wndw->wndw); + if (ret) { + NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret); + return ret; + } + + return 0; +} + +int +ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass, + struct nv50_wndw **pwndw) +{ + return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c new file mode 100644 index 000000000000..a99ba6a7216f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "core.h" + +#include <nvif/class.h> + +static void +pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl, + struct nv50_head_atom *asyh) +{ + u32 *push; + if ((push = evo_wait(&core->chan, 8))) { + if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + if (asyh) { + ctrl |= asyh->or.depth << 16; + ctrl |= asyh->or.nvsync << 13; + ctrl |= asyh->or.nhsync << 12; + } + evo_mthd(push, 0x0700 + (or * 0x040), 1); + evo_data(push, ctrl); + } + evo_kick(push, &core->chan); + } +} + +const struct nv50_outp_func +pior507d = { + .ctrl = pior507d_ctrl, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c new file mode 100644 index 000000000000..2d540de27f59 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c @@ -0,0 +1,50 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "core.h" + +#include <nvif/class.h> + +static void +sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl, + struct nv50_head_atom *asyh) +{ + u32 *push; + if ((push = evo_wait(&core->chan, 6))) { + if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { + if (asyh) { + ctrl |= asyh->or.depth << 16; + ctrl |= asyh->or.nvsync << 13; + ctrl |= asyh->or.nhsync << 12; + } + evo_mthd(push, 0x0600 + (or * 0x40), 1); + } else { + evo_mthd(push, 0x0200 + (or * 0x20), 1); + } + evo_data(push, ctrl); + evo_kick(push, &core->chan); + } +} + +const struct nv50_outp_func +sor507d = { + .ctrl = sor507d_ctrl, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c new file mode 100644 index 000000000000..71a4c50af8ec --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -0,0 +1,434 @@ +/* + * Copyright 2018 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "wndw.h" + +#include <nvif/class.h> +#include <nvif/cl0002.h> + +#include <drm/drm_atomic_helper.h> +#include "nouveau_bo.h" + +static void +nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma) +{ + nvif_object_fini(&ctxdma->object); + list_del(&ctxdma->head); + kfree(ctxdma); +} + +static struct nv50_wndw_ctxdma * +nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb) +{ + struct nouveau_drm *drm = nouveau_drm(fb->base.dev); + struct nv50_wndw_ctxdma *ctxdma; + const u8 kind = fb->nvbo->kind; + const u32 handle = 0xfb000000 | kind; + struct { + struct nv_dma_v0 base; + union { + struct nv50_dma_v0 nv50; + struct gf100_dma_v0 gf100; + struct gf119_dma_v0 gf119; + }; + } args = {}; + u32 argc = sizeof(args.base); + int ret; + + list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) { + if (ctxdma->object.handle == handle) + return ctxdma; + } + + if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + list_add(&ctxdma->head, &wndw->ctxdma.list); + + args.base.target = NV_DMA_V0_TARGET_VRAM; + args.base.access = NV_DMA_V0_ACCESS_RDWR; + args.base.start = 0; + args.base.limit = drm->client.device.info.ram_user - 1; + + if (drm->client.device.info.chipset < 0x80) { + args.nv50.part = NV50_DMA_V0_PART_256; + argc += sizeof(args.nv50); + } else + if (drm->client.device.info.chipset < 0xc0) { + args.nv50.part = NV50_DMA_V0_PART_256; + args.nv50.kind = kind; + argc += sizeof(args.nv50); + } else + if (drm->client.device.info.chipset < 0xd0) { + args.gf100.kind = kind; + argc += sizeof(args.gf100); + } else { + args.gf119.page = GF119_DMA_V0_PAGE_LP; + args.gf119.kind = kind; + argc += sizeof(args.gf119); + } + + ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY, + &args, argc, &ctxdma->object); + if (ret) { + nv50_wndw_ctxdma_del(ctxdma); + return ERR_PTR(ret); + } + + return ctxdma; +} + +int +nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + if (asyw->set.ntfy) + return wndw->func->ntfy_wait_begun(wndw, asyw); + return 0; +} + +u32 +nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush, + struct nv50_wndw_atom *asyw) +{ + if (asyw->clr.sema && (!asyw->set.sema || flush)) + wndw->func->sema_clr(wndw); + if (asyw->clr.ntfy && (!asyw->set.ntfy || flush)) + wndw->func->ntfy_clr(wndw); + if (asyw->clr.image && (!asyw->set.image || flush)) + wndw->func->image_clr(wndw); + + return flush ? wndw->func->update(wndw, interlock) : 0; +} + +u32 +nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock, + struct nv50_wndw_atom *asyw) +{ + if (interlock) { + asyw->image.mode = 0; + asyw->image.interval = 1; + } + + if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw); + if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw); + if (asyw->set.image) wndw->func->image_set(wndw, asyw); + if (asyw->set.lut ) wndw->func->lut (wndw, asyw); + if (asyw->set.point) { + wndw->immd->point(wndw, asyw); + wndw->immd->update(wndw, interlock); + } + + return wndw->func->update ? wndw->func->update(wndw, interlock) : 0; +} + +static void +nv50_wndw_atomic_check_release(struct nv50_wndw *wndw, + struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); + NV_ATOMIC(drm, "%s release\n", wndw->plane.name); + wndw->func->release(wndw, asyw, asyh); + asyw->ntfy.handle = 0; + asyw->sema.handle = 0; +} + +static int +nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, + struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb); + struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); + int ret; + + NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name); + + asyw->image.w = fb->base.width; + asyw->image.h = fb->base.height; + asyw->image.kind = fb->nvbo->kind; + + if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) + asyw->interval = 0; + else + asyw->interval = 1; + + if (asyw->image.kind) { + asyw->image.layout = 0; + if (drm->client.device.info.chipset >= 0xc0) + asyw->image.block = fb->nvbo->mode >> 4; + else + asyw->image.block = fb->nvbo->mode; + asyw->image.pitch = (fb->base.pitches[0] / 4) << 4; + } else { + asyw->image.layout = 1; + asyw->image.block = 0; + asyw->image.pitch = fb->base.pitches[0]; + } + + ret = wndw->func->acquire(wndw, asyw, asyh); + if (ret) + return ret; + + if (asyw->set.image) { + if (!(asyw->image.mode = asyw->interval ? 0 : 1)) + asyw->image.interval = asyw->interval; + else + asyw->image.interval = 0; + } + + return 0; +} + +int +nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(plane->dev); + struct nv50_wndw *wndw = nv50_wndw(plane); + struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state); + struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); + struct nv50_head_atom *harm = NULL, *asyh = NULL; + bool varm = false, asyv = false, asym = false; + int ret; + + NV_ATOMIC(drm, "%s atomic_check\n", plane->name); + if (asyw->state.crtc) { + asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc); + if (IS_ERR(asyh)) + return PTR_ERR(asyh); + asym = drm_atomic_crtc_needs_modeset(&asyh->state); + asyv = asyh->state.active; + } + + if (armw->state.crtc) { + harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc); + if (IS_ERR(harm)) + return PTR_ERR(harm); + varm = harm->state.crtc->state->active; + } + + if (asyv) { + asyw->point.x = asyw->state.crtc_x; + asyw->point.y = asyw->state.crtc_y; + if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) + asyw->set.point = true; + + ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); + if (ret) + return ret; + } else + if (varm) { + nv50_wndw_atomic_check_release(wndw, asyw, harm); + } else { + return 0; + } + + if (!asyv || asym) { + asyw->clr.ntfy = armw->ntfy.handle != 0; + asyw->clr.sema = armw->sema.handle != 0; + if (wndw->func->image_clr) + asyw->clr.image = armw->image.handle != 0; + asyw->set.lut = wndw->func->lut && asyv; + } + + return 0; +} + +static void +nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb); + struct nouveau_drm *drm = nouveau_drm(plane->dev); + + NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb); + if (!old_state->fb) + return; + + nouveau_bo_unpin(fb->nvbo); +} + +static int +nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb); + struct nouveau_drm *drm = nouveau_drm(plane->dev); + struct nv50_wndw *wndw = nv50_wndw(plane); + struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); + struct nv50_head_atom *asyh; + struct nv50_wndw_ctxdma *ctxdma; + int ret; + + NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb); + if (!asyw->state.fb) + return 0; + + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true); + if (ret) + return ret; + + ctxdma = nv50_wndw_ctxdma_new(wndw, fb); + if (IS_ERR(ctxdma)) { + nouveau_bo_unpin(fb->nvbo); + return PTR_ERR(ctxdma); + } + + asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); + asyw->image.handle = ctxdma->object.handle; + asyw->image.offset = fb->nvbo->bo.offset; + + if (wndw->func->prepare) { + asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc); + if (IS_ERR(asyh)) + return PTR_ERR(asyh); + + wndw->func->prepare(wndw, asyh, asyw); + } + + return 0; +} + +static const struct drm_plane_helper_funcs +nv50_wndw_helper = { + .prepare_fb = nv50_wndw_prepare_fb, + .cleanup_fb = nv50_wndw_cleanup_fb, + .atomic_check = nv50_wndw_atomic_check, +}; + +static void +nv50_wndw_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); + __drm_atomic_helper_plane_destroy_state(&asyw->state); + kfree(asyw); +} + +static struct drm_plane_state * +nv50_wndw_atomic_duplicate_state(struct drm_plane *plane) +{ + struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state); + struct nv50_wndw_atom *asyw; + if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) + return NULL; + __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); + asyw->interval = 1; + asyw->sema = armw->sema; + asyw->ntfy = armw->ntfy; + asyw->image = armw->image; + asyw->point = armw->point; + asyw->lut = armw->lut; + asyw->clr.mask = 0; + asyw->set.mask = 0; + return &asyw->state; +} + +static void +nv50_wndw_reset(struct drm_plane *plane) +{ + struct nv50_wndw_atom *asyw; + + if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL)))) + return; + + if (plane->state) + plane->funcs->atomic_destroy_state(plane, plane->state); + plane->state = &asyw->state; + plane->state->plane = plane; + plane->state->rotation = DRM_MODE_ROTATE_0; +} + +static void +nv50_wndw_destroy(struct drm_plane *plane) +{ + struct nv50_wndw *wndw = nv50_wndw(plane); + struct nv50_wndw_ctxdma *ctxdma, *ctxtmp; + + list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) { + nv50_wndw_ctxdma_del(ctxdma); + } + + nvif_notify_fini(&wndw->notify); + nv50_dmac_destroy(&wndw->wimm); + nv50_dmac_destroy(&wndw->wndw); + drm_plane_cleanup(&wndw->plane); + kfree(wndw); +} + +const struct drm_plane_funcs +nv50_wndw = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = nv50_wndw_destroy, + .reset = nv50_wndw_reset, + .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state, + .atomic_destroy_state = nv50_wndw_atomic_destroy_state, +}; + +static int +nv50_wndw_notify(struct nvif_notify *notify) +{ + return NVIF_NOTIFY_KEEP; +} + +void +nv50_wndw_fini(struct nv50_wndw *wndw) +{ + nvif_notify_put(&wndw->notify); +} + +void +nv50_wndw_init(struct nv50_wndw *wndw) +{ + nvif_notify_get(&wndw->notify); +} + +int +nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, + enum drm_plane_type type, const char *name, int index, + const u32 *format, struct nv50_wndw **pwndw) +{ + struct nv50_wndw *wndw; + int nformat; + int ret; + + if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL))) + return -ENOMEM; + wndw->func = func; + wndw->id = index; + + wndw->ctxdma.parent = &wndw->wndw.base.user; + INIT_LIST_HEAD(&wndw->ctxdma.list); + + for (nformat = 0; format[nformat]; nformat++); + + ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, + format, nformat, NULL, + type, "%s-%d", name, index); + if (ret) { + kfree(*pwndw); + *pwndw = NULL; + return ret; + } + + drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); + + wndw->notify.func = nv50_wndw_notify; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h new file mode 100644 index 000000000000..1931e3068115 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -0,0 +1,73 @@ +#ifndef __NV50_KMS_WNDW_H__ +#define __NV50_KMS_WNDW_H__ +#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane) +#include "disp.h" +#include "atom.h" + +#include <nvif/notify.h> + +struct nv50_wndw_ctxdma { + struct list_head head; + struct nvif_object object; +}; + +struct nv50_wndw { + const struct nv50_wndw_func *func; + const struct nv50_wimm_func *immd; + int id; + + struct { + struct nvif_object *parent; + struct list_head list; + } ctxdma; + + struct drm_plane plane; + + struct nv50_dmac wndw; + struct nv50_dmac wimm; + + struct nvif_notify notify; + u16 ntfy; + u16 sema; + u32 data; +}; + +int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *, + enum drm_plane_type, const char *name, int index, + const u32 *format, struct nv50_wndw **); +void nv50_wndw_init(struct nv50_wndw *); +void nv50_wndw_fini(struct nv50_wndw *); +u32 nv50_wndw_flush_set(struct nv50_wndw *, u32 interlock, + struct nv50_wndw_atom *); +u32 nv50_wndw_flush_clr(struct nv50_wndw *, u32 interlock, bool flush, + struct nv50_wndw_atom *); +int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *); + +struct nv50_wndw_func { + int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh); + void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw, + struct nv50_head_atom *asyh); + void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh, + struct nv50_wndw_atom *asyw); + + void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *); + void (*sema_clr)(struct nv50_wndw *); + void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *); + void (*ntfy_clr)(struct nv50_wndw *); + int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *); + void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *); + void (*image_clr)(struct nv50_wndw *); + void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *); + + u32 (*update)(struct nv50_wndw *, u32 interlock); +}; + +extern const struct drm_plane_funcs nv50_wndw; + +struct nv50_wimm_func { + void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *); + + u32 (*update)(struct nv50_wndw *, u32 interlock); +}; +#endif |