diff options
67 files changed, 3009 insertions, 377 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 3862af19b9a3..520002480058 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4153,6 +4153,7 @@ F: drivers/gpu/drm/gma500/ DRM DRIVERS FOR HISILICON M: Xinliang Liu <z.liuxinliang@hisilicon.com> +M: Rongrong Zou <zourongrong@gmail.com> R: Xinwei Kong <kong.kongxinwei@hisilicon.com> R: Chen Feng <puck.chen@hisilicon.com> L: dri-devel@lists.freedesktop.org diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig index 558c61b1b8e8..2fd2724b7a7d 100644 --- a/drivers/gpu/drm/hisilicon/Kconfig +++ b/drivers/gpu/drm/hisilicon/Kconfig @@ -2,4 +2,5 @@ # hisilicon drm device configuration. # Please keep this list sorted alphabetically +source "drivers/gpu/drm/hisilicon/hibmc/Kconfig" source "drivers/gpu/drm/hisilicon/kirin/Kconfig" diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile index e3f6d493c996..c8155bfb1ff1 100644 --- a/drivers/gpu/drm/hisilicon/Makefile +++ b/drivers/gpu/drm/hisilicon/Makefile @@ -2,4 +2,5 @@ # Makefile for hisilicon drm drivers. # Please keep this list sorted alphabetically +obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc/ obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/ diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig new file mode 100644 index 000000000000..380622a0da35 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -0,0 +1,9 @@ +config DRM_HISI_HIBMC + tristate "DRM Support for Hisilicon Hibmc" + depends on DRM && PCI + select DRM_KMS_HELPER + select DRM_TTM + + help + Choose this option if you have a Hisilicon Hibmc soc chipset. + If M is selected the module will be called hibmc-drm. diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile new file mode 100644 index 000000000000..f2e04c035673 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Iinclude/drm +hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o + +obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c new file mode 100644 index 000000000000..2a1386e33126 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -0,0 +1,477 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> + +#include "hibmc_drm_drv.h" +#include "hibmc_drm_regs.h" + +struct hibmc_display_panel_pll { + unsigned long M; + unsigned long N; + unsigned long OD; + unsigned long POD; +}; + +struct hibmc_dislay_pll_config { + unsigned long hdisplay; + unsigned long vdisplay; + u32 pll1_config_value; + u32 pll2_config_value; +}; + +static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { + {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ}, + {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ}, + {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ}, + {1280, 768, CRT_PLL1_HS_80MHZ, CRT_PLL2_HS_80MHZ}, + {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ}, + {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, + {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, + {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ}, + {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ}, + {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ}, +}; + +#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1))) + +static int hibmc_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + + if (!crtc || !fb) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_DEBUG_ATOMIC("scale not support\n"); + return -EINVAL; + } + + if (state->crtc_x < 0 || state->crtc_y < 0) { + DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n"); + return -EINVAL; + } + + if (state->crtc_x + state->crtc_w > + crtc_state->adjusted_mode.hdisplay || + state->crtc_y + state->crtc_h > + crtc_state->adjusted_mode.vdisplay) { + DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n"); + return -EINVAL; + } + + return 0; +} + +static void hibmc_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + u32 reg; + int ret; + u64 gpu_addr = 0; + unsigned int line_l; + struct hibmc_drm_private *priv = plane->dev->dev_private; + struct hibmc_framebuffer *hibmc_fb; + struct hibmc_bo *bo; + + if (!state->fb) + return; + + hibmc_fb = to_hibmc_framebuffer(state->fb); + bo = gem_to_hibmc_bo(hibmc_fb->obj); + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); + if (ret) { + DRM_ERROR("failed to reserve ttm_bo: %d", ret); + return; + } + + ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); + ttm_bo_unreserve(&bo->bo); + if (ret) { + DRM_ERROR("failed to pin hibmc_bo: %d", ret); + return; + } + + writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); + + reg = state->fb->width * (state->fb->bits_per_pixel / 8); + /* now line_pad is 16 */ + reg = PADDING(16, reg); + + line_l = state->fb->width * state->fb->bits_per_pixel / 8; + line_l = PADDING(16, line_l); + writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | + HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), + priv->mmio + HIBMC_CRT_FB_WIDTH); + + /* SET PIXEL FORMAT */ + reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL); + reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK; + reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT, + state->fb->bits_per_pixel / 16); + writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL); +} + +static const u32 channel_formats1[] = { + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888 +}; + +static struct drm_plane_funcs hibmc_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .set_property = drm_atomic_helper_plane_set_property, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { + .atomic_check = hibmc_plane_atomic_check, + .atomic_update = hibmc_plane_atomic_update, +}; + +static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_plane *plane; + int ret = 0; + + plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) { + DRM_ERROR("failed to alloc memory when init plane\n"); + return ERR_PTR(-ENOMEM); + } + /* + * plane init + * TODO: Now only support primary plane, overlay planes + * need to do. + */ + ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs, + channel_formats1, + ARRAY_SIZE(channel_formats1), + DRM_PLANE_TYPE_PRIMARY, + NULL); + if (ret) { + DRM_ERROR("failed to init plane: %d\n", ret); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &hibmc_plane_helper_funcs); + return plane; +} + +static void hibmc_crtc_enable(struct drm_crtc *crtc) +{ + unsigned int reg; + struct hibmc_drm_private *priv = crtc->dev->dev_private; + + hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate*/ + reg = readl(priv->mmio + HIBMC_CURRENT_GATE); + reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK; + reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK; + reg |= HIBMC_CURR_GATE_LOCALMEM(1); + reg |= HIBMC_CURR_GATE_DISPLAY(1); + hibmc_set_current_gate(priv, reg); + drm_crtc_vblank_on(crtc); +} + +static void hibmc_crtc_disable(struct drm_crtc *crtc) +{ + unsigned int reg; + struct hibmc_drm_private *priv = crtc->dev->dev_private; + + drm_crtc_vblank_off(crtc); + + hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP); + + /* Enable display power gate & LOCALMEM power gate*/ + reg = readl(priv->mmio + HIBMC_CURRENT_GATE); + reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK; + reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK; + reg |= HIBMC_CURR_GATE_LOCALMEM(0); + reg |= HIBMC_CURR_GATE_DISPLAY(0); + hibmc_set_current_gate(priv, reg); +} + +static unsigned int format_pll_reg(void) +{ + unsigned int pllreg = 0; + struct hibmc_display_panel_pll pll = {0}; + + /* + * Note that all PLL's have the same format. Here, + * we just use Panel PLL parameter to work out the bit + * fields in the register.On returning a 32 bit number, the value can + * be applied to any PLL in the calling function. + */ + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_BYPASS, 0); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POWER, 1); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_INPUT, 0); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POD, pll.POD); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_OD, pll.OD); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_N, pll.N); + pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_M, pll.M); + + return pllreg; +} + +static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll) +{ + u32 val; + struct hibmc_drm_private *priv = dev->dev_private; + + val = readl(priv->mmio + CRT_PLL1_HS); + val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_HS); + + val = CRT_PLL1_HS_INTER_BYPASS(1) | CRT_PLL1_HS_POWERON(1); + writel(val, priv->mmio + CRT_PLL1_HS); + + writel(pll, priv->mmio + CRT_PLL1_HS); + + usleep_range(1000, 2000); + + val = pll & ~(CRT_PLL1_HS_POWERON(1)); + writel(val, priv->mmio + CRT_PLL1_HS); + + usleep_range(1000, 2000); + + val &= ~(CRT_PLL1_HS_INTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_HS); + + usleep_range(1000, 2000); + + val |= CRT_PLL1_HS_OUTER_BYPASS(1); + writel(val, priv->mmio + CRT_PLL1_HS); +} + +static void get_pll_config(unsigned long x, unsigned long y, + u32 *pll1, u32 *pll2) +{ + int i; + int count = ARRAY_SIZE(hibmc_pll_table); + + for (i = 0; i < count; i++) { + if (hibmc_pll_table[i].hdisplay == x && + hibmc_pll_table[i].vdisplay == y) { + *pll1 = hibmc_pll_table[i].pll1_config_value; + *pll2 = hibmc_pll_table[i].pll2_config_value; + return; + } + } + + /* if found none, we use default value */ + *pll1 = CRT_PLL1_HS_25MHZ; + *pll2 = CRT_PLL2_HS_25MHZ; +} + +/* + * This function takes care the extra registers and bit fields required to + * setup a mode in board. + * Explanation about Display Control register: + * FPGA only supports 7 predefined pixel clocks, and clock select is + * in bit 4:0 of new register 0x802a8. + */ +static unsigned int display_ctrl_adjust(struct drm_device *dev, + struct drm_display_mode *mode, + unsigned int ctrl) +{ + unsigned long x, y; + u32 pll1; /* bit[31:0] of PLL */ + u32 pll2; /* bit[63:32] of PLL */ + struct hibmc_drm_private *priv = dev->dev_private; + + x = mode->hdisplay; + y = mode->vdisplay; + + get_pll_config(x, y, &pll1, &pll2); + writel(pll2, priv->mmio + CRT_PLL2_HS); + set_vclock_hisilicon(dev, pll1); + + /* + * Hisilicon has to set up the top-left and bottom-right + * registers as well. + * Note that normal chip only use those two register for + * auto-centering mode. + */ + writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_TOP, 0) | + HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_LEFT, 0), + priv->mmio + HIBMC_CRT_AUTO_CENTERING_TL); + + writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) | + HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_RIGHT, x - 1), + priv->mmio + HIBMC_CRT_AUTO_CENTERING_BR); + + /* + * Assume common fields in ctrl have been properly set before + * calling this function. + * This function only sets the extra fields in ctrl. + */ + + /* Set bit 25 of display controller: Select CRT or VGA clock */ + ctrl &= ~HIBMC_CRT_DISP_CTL_CRTSELECT_MASK; + ctrl &= ~HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK; + + ctrl |= HIBMC_CRT_DISP_CTL_CRTSELECT(HIBMC_CRTSELECT_CRT); + + /* clock_phase_polarity is 0 */ + ctrl |= HIBMC_CRT_DISP_CTL_CLOCK_PHASE(0); + + writel(ctrl, priv->mmio + HIBMC_CRT_DISP_CTL); + + return ctrl; +} + +static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + unsigned int val; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_device *dev = crtc->dev; + struct hibmc_drm_private *priv = dev->dev_private; + int width = mode->hsync_end - mode->hsync_start; + int height = mode->vsync_end - mode->vsync_start; + + writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL); + writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) | + HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1), + priv->mmio + HIBMC_CRT_HORZ_TOTAL); + + writel(HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_WIDTH, width) | + HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_START, mode->hsync_start - 1), + priv->mmio + HIBMC_CRT_HORZ_SYNC); + + writel(HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) | + HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1), + priv->mmio + HIBMC_CRT_VERT_TOTAL); + + writel(HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_HEIGHT, height) | + HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_START, mode->vsync_start - 1), + priv->mmio + HIBMC_CRT_VERT_SYNC); + + val = HIBMC_FIELD(HIBMC_CRT_DISP_CTL_VSYNC_PHASE, 0); + val |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_HSYNC_PHASE, 0); + val |= HIBMC_CRT_DISP_CTL_TIMING(1); + val |= HIBMC_CRT_DISP_CTL_PLANE(1); + + display_ctrl_adjust(dev, mode, val); +} + +static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + unsigned int reg; + struct drm_device *dev = crtc->dev; + struct hibmc_drm_private *priv = dev->dev_private; + + hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate*/ + reg = readl(priv->mmio + HIBMC_CURRENT_GATE); + reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK; + reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK; + reg |= HIBMC_CURR_GATE_DISPLAY(1); + reg |= HIBMC_CURR_GATE_LOCALMEM(1); + hibmc_set_current_gate(priv, reg); + + /* We can add more initialization as needed. */ +} + +static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) + +{ + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (crtc->state->event) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +static const struct drm_crtc_funcs hibmc_crtc_funcs = { + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = { + .enable = hibmc_crtc_enable, + .disable = hibmc_crtc_disable, + .mode_set_nofb = hibmc_crtc_mode_set_nofb, + .atomic_begin = hibmc_crtc_atomic_begin, + .atomic_flush = hibmc_crtc_atomic_flush, +}; + +int hibmc_de_init(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + int ret; + + plane = hibmc_plane_init(priv); + if (IS_ERR(plane)) { + DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane)); + return PTR_ERR(plane); + } + + crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL); + if (!crtc) { + DRM_ERROR("failed to alloc memory when init crtc\n"); + return -ENOMEM; + } + + ret = drm_crtc_init_with_planes(dev, crtc, plane, + NULL, &hibmc_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("failed to init crtc: %d\n", ret); + return ret; + } + + ret = drm_mode_crtc_set_gamma_size(crtc, 256); + if (ret) { + DRM_ERROR("failed to set gamma size: %d\n", ret); + return ret; + } + drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c new file mode 100644 index 000000000000..73ba8b05f1da --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -0,0 +1,456 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/console.h> +#include <linux/module.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "hibmc_drm_drv.h" +#include "hibmc_drm_regs.h" + +static const struct file_operations hibmc_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .mmap = hibmc_mmap, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, +}; + +static int hibmc_enable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct hibmc_drm_private *priv = + (struct hibmc_drm_private *)dev->dev_private; + + writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1), + priv->mmio + HIBMC_RAW_INTERRUPT_EN); + + return 0; +} + +static void hibmc_disable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct hibmc_drm_private *priv = + (struct hibmc_drm_private *)dev->dev_private; + + writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0), + priv->mmio + HIBMC_RAW_INTERRUPT_EN); +} + +irqreturn_t hibmc_drm_interrupt(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct hibmc_drm_private *priv = + (struct hibmc_drm_private *)dev->dev_private; + u32 status; + + status = readl(priv->mmio + HIBMC_RAW_INTERRUPT); + + if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) { + writel(HIBMC_RAW_INTERRUPT_VBLANK(1), + priv->mmio + HIBMC_RAW_INTERRUPT); + drm_handle_vblank(dev, 0); + } + + return IRQ_HANDLED; +} + +static struct drm_driver hibmc_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + .fops = &hibmc_fops, + .name = "hibmc", + .date = "20160828", + .desc = "hibmc drm driver", + .major = 1, + .minor = 0, + .get_vblank_counter = drm_vblank_no_hw_counter, + .enable_vblank = hibmc_enable_vblank, + .disable_vblank = hibmc_disable_vblank, + .gem_free_object_unlocked = hibmc_gem_free_object, + .dumb_create = hibmc_dumb_create, + .dumb_map_offset = hibmc_dumb_mmap_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .irq_handler = hibmc_drm_interrupt, +}; + +static int hibmc_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct hibmc_drm_private *priv = drm_dev->dev_private; + + drm_kms_helper_poll_disable(drm_dev); + priv->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(priv->suspend_state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", + PTR_ERR(priv->suspend_state)); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(priv->suspend_state); + } + + return 0; +} + +static int hibmc_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct hibmc_drm_private *priv = drm_dev->dev_private; + + drm_atomic_helper_resume(drm_dev, priv->suspend_state); + drm_kms_helper_poll_enable(drm_dev); + + return 0; +} + +static const struct dev_pm_ops hibmc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend, + hibmc_pm_resume) +}; + +static int hibmc_kms_init(struct hibmc_drm_private *priv) +{ + int ret; + + drm_mode_config_init(priv->dev); + priv->mode_config_initialized = true; + + priv->dev->mode_config.min_width = 0; + priv->dev->mode_config.min_height = 0; + priv->dev->mode_config.max_width = 1920; + priv->dev->mode_config.max_height = 1440; + + priv->dev->mode_config.fb_base = priv->fb_base; + priv->dev->mode_config.preferred_depth = 24; + priv->dev->mode_config.prefer_shadow = 0; + + priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; + + ret = hibmc_de_init(priv); + if (ret) { + DRM_ERROR("failed to init de: %d\n", ret); + return ret; + } + + ret = hibmc_vdac_init(priv); + if (ret) { + DRM_ERROR("failed to init vdac: %d\n", ret); + return ret; + } + + return 0; +} + +static void hibmc_kms_fini(struct hibmc_drm_private *priv) +{ + if (priv->mode_config_initialized) { + drm_mode_config_cleanup(priv->dev); + priv->mode_config_initialized = false; + } +} + +/* + * It can operate in one of three modes: 0, 1 or Sleep. + */ +void hibmc_set_power_mode(struct hibmc_drm_private *priv, + unsigned int power_mode) +{ + unsigned int control_value = 0; + void __iomem *mmio = priv->mmio; + unsigned int input = 1; + + if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP) + return; + + if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP) + input = 0; + + control_value = readl(mmio + HIBMC_POWER_MODE_CTRL); + control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK | + HIBMC_PW_MODE_CTL_OSC_INPUT_MASK); + control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode); + control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input); + writel(control_value, mmio + HIBMC_POWER_MODE_CTRL); +} + +void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate) +{ + unsigned int gate_reg; + unsigned int mode; + void __iomem *mmio = priv->mmio; + + /* Get current power mode. */ + mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) & + HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT; + + switch (mode) { + case HIBMC_PW_MODE_CTL_MODE_MODE0: + gate_reg = HIBMC_MODE0_GATE; + break; + + case HIBMC_PW_MODE_CTL_MODE_MODE1: + gate_reg = HIBMC_MODE1_GATE; + break; + + default: + gate_reg = HIBMC_MODE0_GATE; + break; + } + writel(gate, mmio + gate_reg); +} + +static void hibmc_hw_config(struct hibmc_drm_private *priv) +{ + unsigned int reg; + + /* On hardware reset, power mode 0 is default. */ + hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate*/ + reg = readl(priv->mmio + HIBMC_CURRENT_GATE); + reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK; + reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK; + reg |= HIBMC_CURR_GATE_DISPLAY(1); + reg |= HIBMC_CURR_GATE_LOCALMEM(1); + + hibmc_set_current_gate(priv, reg); + + /* + * Reset the memory controller. If the memory controller + * is not reset in chip,the system might hang when sw accesses + * the memory.The memory should be resetted after + * changing the MXCLK. + */ + reg = readl(priv->mmio + HIBMC_MISC_CTRL); + reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK; + reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0); + writel(reg, priv->mmio + HIBMC_MISC_CTRL); + + reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK; + reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1); + + writel(reg, priv->mmio + HIBMC_MISC_CTRL); +} + +static int hibmc_hw_map(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct pci_dev *pdev = dev->pdev; + resource_size_t addr, size, ioaddr, iosize; + + ioaddr = pci_resource_start(pdev, 1); + iosize = pci_resource_len(pdev, 1); + priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize); + if (!priv->mmio) { + DRM_ERROR("Cannot map mmio region\n"); + return -ENOMEM; + } + + addr = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + priv->fb_map = devm_ioremap(dev->dev, addr, size); + if (!priv->fb_map) { + DRM_ERROR("Cannot map framebuffer\n"); + return -ENOMEM; + } + priv->fb_base = addr; + priv->fb_size = size; + + return 0; +} + +static int hibmc_hw_init(struct hibmc_drm_private *priv) +{ + int ret; + + ret = hibmc_hw_map(priv); + if (ret) + return ret; + + hibmc_hw_config(priv); + + return 0; +} + +static int hibmc_unload(struct drm_device *dev) +{ + struct hibmc_drm_private *priv = dev->dev_private; + + hibmc_fbdev_fini(priv); + + if (dev->irq_enabled) + drm_irq_uninstall(dev); + if (priv->msi_enabled) + pci_disable_msi(dev->pdev); + drm_vblank_cleanup(dev); + + hibmc_kms_fini(priv); + hibmc_mm_fini(priv); + dev->dev_private = NULL; + return 0; +} + +static int hibmc_load(struct drm_device *dev) +{ + struct hibmc_drm_private *priv; + int ret; + + priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + DRM_ERROR("no memory to allocate for hibmc_drm_private\n"); + return -ENOMEM; + } + dev->dev_private = priv; + priv->dev = dev; + + ret = hibmc_hw_init(priv); + if (ret) + goto err; + + ret = hibmc_mm_init(priv); + if (ret) + goto err; + + ret = hibmc_kms_init(priv); + if (ret) + goto err; + + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); + if (ret) { + DRM_ERROR("failed to initialize vblank: %d\n", ret); + goto err; + } + + priv->msi_enabled = 0; + ret = pci_enable_msi(dev->pdev); + if (ret) { + DRM_WARN("enabling MSI failed: %d\n", ret); + } else { + priv->msi_enabled = 1; + ret = drm_irq_install(dev, dev->pdev->irq); + if (ret) + DRM_WARN("install irq failed: %d\n", ret); + } + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(dev); + + ret = hibmc_fbdev_init(priv); + if (ret) { + DRM_ERROR("failed to initialize fbdev: %d\n", ret); + goto err; + } + + return 0; + +err: + hibmc_unload(dev); + DRM_ERROR("failed to initialize drm driver: %d\n", ret); + return ret; +} + +static int hibmc_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct drm_device *dev; + int ret; + + dev = drm_dev_alloc(&hibmc_driver, &pdev->dev); + if (!dev) { + DRM_ERROR("failed to allocate drm_device\n"); + return -ENOMEM; + } + + dev->pdev = pdev; + pci_set_drvdata(pdev, dev); + + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("failed to enable pci device: %d\n", ret); + goto err_free; + } + + ret = hibmc_load(dev); + if (ret) { + DRM_ERROR("failed to load hibmc: %d\n", ret); + goto err_disable; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drv for userspace access: %d\n", + ret); + goto err_unload; + } + return 0; + +err_unload: + hibmc_unload(dev); +err_disable: + pci_disable_device(pdev); +err_free: + drm_dev_unref(dev); + + return ret; +} + +static void hibmc_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_dev_unregister(dev); + hibmc_unload(dev); + drm_dev_unref(dev); +} + +static struct pci_device_id hibmc_pci_table[] = { + {0x19e5, 0x1711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; + +static struct pci_driver hibmc_pci_driver = { + .name = "hibmc-drm", + .id_table = hibmc_pci_table, + .probe = hibmc_pci_probe, + .remove = hibmc_pci_remove, + .driver.pm = &hibmc_pm_ops, +}; + +static int __init hibmc_init(void) +{ + return pci_register_driver(&hibmc_pci_driver); +} + +static void __exit hibmc_exit(void) +{ + return pci_unregister_driver(&hibmc_pci_driver); +} + +module_init(hibmc_init); +module_exit(hibmc_exit); + +MODULE_DEVICE_TABLE(pci, hibmc_pci_table); +MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>"); +MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h new file mode 100644 index 000000000000..e195521eb41e --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -0,0 +1,114 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef HIBMC_DRM_DRV_H +#define HIBMC_DRM_DRV_H + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> +#include <drm/ttm/ttm_bo_driver.h> + +struct hibmc_framebuffer { + struct drm_framebuffer fb; + struct drm_gem_object *obj; +}; + +struct hibmc_fbdev { + struct drm_fb_helper helper; + struct hibmc_framebuffer *fb; + int size; +}; + +struct hibmc_drm_private { + /* hw */ + void __iomem *mmio; + void __iomem *fb_map; + unsigned long fb_base; + unsigned long fb_size; + bool msi_enabled; + + /* drm */ + struct drm_device *dev; + bool mode_config_initialized; + struct drm_atomic_state *suspend_state; + + /* ttm */ + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; + bool initialized; + + /* fbdev */ + struct hibmc_fbdev *fbdev; + bool mm_inited; +}; + +#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb) + +struct hibmc_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + struct ttm_bo_kmap_obj kmap; + struct drm_gem_object gem; + struct ttm_place placements[3]; + int pin_count; +}; + +static inline struct hibmc_bo *hibmc_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct hibmc_bo, bo); +} + +static inline struct hibmc_bo *gem_to_hibmc_bo(struct drm_gem_object *gem) +{ + return container_of(gem, struct hibmc_bo, gem); +} + +void hibmc_set_power_mode(struct hibmc_drm_private *priv, + unsigned int power_mode); +void hibmc_set_current_gate(struct hibmc_drm_private *priv, + unsigned int gate); + +int hibmc_de_init(struct hibmc_drm_private *priv); +int hibmc_vdac_init(struct hibmc_drm_private *priv); +int hibmc_fbdev_init(struct hibmc_drm_private *priv); +void hibmc_fbdev_fini(struct hibmc_drm_private *priv); + +int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel, + struct drm_gem_object **obj); +struct hibmc_framebuffer * +hibmc_framebuffer_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +int hibmc_mm_init(struct hibmc_drm_private *hibmc); +void hibmc_mm_fini(struct hibmc_drm_private *hibmc); +int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr); +int hibmc_bo_unpin(struct hibmc_bo *bo); +void hibmc_gem_free_object(struct drm_gem_object *obj); +int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); +int hibmc_mmap(struct file *filp, struct vm_area_struct *vma); + +extern const struct drm_mode_config_funcs hibmc_mode_funcs; + +#endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c new file mode 100644 index 000000000000..9b0696735ba1 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -0,0 +1,267 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> + +#include "hibmc_drm_drv.h" + +static int hibmcfb_create_object( + struct hibmc_drm_private *priv, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **gobj_p) +{ + struct drm_gem_object *gobj; + struct drm_device *dev = priv->dev; + u32 size; + int ret = 0; + + size = mode_cmd->pitches[0] * mode_cmd->height; + ret = hibmc_gem_create(dev, size, true, &gobj); + if (ret) + return ret; + + *gobj_p = gobj; + return ret; +} + +static struct fb_ops hibmc_drm_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static int hibmc_drm_fb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct hibmc_fbdev *hi_fbdev = + container_of(helper, struct hibmc_fbdev, helper); + struct hibmc_drm_private *priv = helper->dev->dev_private; + struct fb_info *info; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_gem_object *gobj = NULL; + int ret = 0; + int ret1; + size_t size; + unsigned int bytes_per_pixel; + struct hibmc_bo *bo = NULL; + + DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + sizes->surface_depth = 32; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel; + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = hibmcfb_create_object(priv, &mode_cmd, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object: %d\n", ret); + return -ENOMEM; + } + + bo = gem_to_hibmc_bo(gobj); + + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); + if (ret) { + DRM_ERROR("failed to reserve ttm_bo: %d\n", ret); + goto out_unref_gem; + } + + ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); + if (ret) { + DRM_ERROR("failed to pin fbcon: %d\n", ret); + goto out_unreserve_ttm_bo; + } + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { + DRM_ERROR("failed to kmap fbcon: %d\n", ret); + goto out_unpin_bo; + } + ttm_bo_unreserve(&bo->bo); + + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + DRM_ERROR("failed to allocate fbi: %d\n", ret); + goto out_release_fbi; + } + + info->par = hi_fbdev; + + hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); + if (IS_ERR(hi_fbdev->fb)) { + ret = PTR_ERR(info); + DRM_ERROR("failed to initialize framebuffer: %d\n", ret); + goto out_release_fbi; + } + + priv->fbdev->size = size; + hi_fbdev->helper.fb = &hi_fbdev->fb->fb; + + strcpy(info->fix.id, "hibmcdrmfb"); + + info->flags = FBINFO_DEFAULT; + info->fbops = &hibmc_drm_fb_ops; + + drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0], + hi_fbdev->fb->fb.depth); + drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width, + sizes->fb_height); + + info->screen_base = bo->kmap.virtual; + info->screen_size = size; + + info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base; + info->fix.smem_len = size; + return 0; + +out_release_fbi: + drm_fb_helper_release_fbi(helper); + ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL); + if (ret1) { + DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1); + goto out_unref_gem; + } + ttm_bo_kunmap(&bo->kmap); +out_unpin_bo: + hibmc_bo_unpin(bo); +out_unreserve_ttm_bo: + ttm_bo_unreserve(&bo->bo); +out_unref_gem: + drm_gem_object_unreference_unlocked(gobj); + + return ret; +} + +static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev) +{ + struct hibmc_framebuffer *gfb = fbdev->fb; + struct drm_fb_helper *fbh = &fbdev->helper; + + drm_fb_helper_unregister_fbi(fbh); + drm_fb_helper_release_fbi(fbh); + + drm_fb_helper_fini(fbh); + + if (gfb) + drm_framebuffer_unreference(&gfb->fb); +} + +static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = { + .fb_probe = hibmc_drm_fb_create, +}; + +int hibmc_fbdev_init(struct hibmc_drm_private *priv) +{ + int ret; + struct fb_var_screeninfo *var; + struct fb_fix_screeninfo *fix; + struct hibmc_fbdev *hifbdev; + + hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL); + if (!hifbdev) { + DRM_ERROR("failed to allocate hibmc_fbdev\n"); + return -ENOMEM; + } + + priv->fbdev = hifbdev; + drm_fb_helper_prepare(priv->dev, &hifbdev->helper, + &hibmc_fbdev_helper_funcs); + + /* Now just one crtc and one channel */ + ret = drm_fb_helper_init(priv->dev, + &hifbdev->helper, 1, 1); + if (ret) { + DRM_ERROR("failed to initialize fb helper: %d\n", ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper); + if (ret) { + DRM_ERROR("failed to add all connectors: %d\n", ret); + goto fini; + } + + ret = drm_fb_helper_initial_config(&hifbdev->helper, 16); + if (ret) { + DRM_ERROR("failed to setup initial conn config: %d\n", ret); + goto fini; + } + + var = &hifbdev->helper.fbdev->var; + fix = &hifbdev->helper.fbdev->fix; + + DRM_DEBUG_DRIVER("Member of info->var is :\n" + "xres=%d\n" + "yres=%d\n" + "xres_virtual=%d\n" + "yres_virtual=%d\n" + "xoffset=%d\n" + "yoffset=%d\n" + "bits_per_pixel=%d\n" + "...\n", var->xres, var->yres, var->xres_virtual, + var->yres_virtual, var->xoffset, var->yoffset, + var->bits_per_pixel); + DRM_DEBUG_DRIVER("Member of info->fix is :\n" + "smem_start=%lx\n" + "smem_len=%d\n" + "type=%d\n" + "type_aux=%d\n" + "visual=%d\n" + "xpanstep=%d\n" + "ypanstep=%d\n" + "ywrapstep=%d\n" + "line_length=%d\n" + "accel=%d\n" + "capabilities=%d\n" + "...\n", fix->smem_start, fix->smem_len, fix->type, + fix->type_aux, fix->visual, fix->xpanstep, + fix->ypanstep, fix->ywrapstep, fix->line_length, + fix->accel, fix->capabilities); + + return 0; + +fini: + drm_fb_helper_fini(&hifbdev->helper); + return ret; +} + +void hibmc_fbdev_fini(struct hibmc_drm_private *priv) +{ + if (!priv->fbdev) + return; + + hibmc_fbdev_destroy(priv->fbdev); + priv->fbdev = NULL; +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h new file mode 100644 index 000000000000..f7035bf3ec1f --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h @@ -0,0 +1,196 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef HIBMC_DRM_HW_H +#define HIBMC_DRM_HW_H + +/* register definition */ +#define HIBMC_MISC_CTRL 0x4 + +#define HIBMC_MSCCTL_LOCALMEM_RESET(x) ((x) << 6) +#define HIBMC_MSCCTL_LOCALMEM_RESET_MASK 0x40 + +#define HIBMC_CURRENT_GATE 0x000040 +#define HIBMC_CURR_GATE_DISPLAY(x) ((x) << 2) +#define HIBMC_CURR_GATE_DISPLAY_MASK 0x4 + +#define HIBMC_CURR_GATE_LOCALMEM(x) ((x) << 1) +#define HIBMC_CURR_GATE_LOCALMEM_MASK 0x2 + +#define HIBMC_MODE0_GATE 0x000044 +#define HIBMC_MODE1_GATE 0x000048 +#define HIBMC_POWER_MODE_CTRL 0x00004C + +#define HIBMC_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3) +#define HIBMC_PW_MODE_CTL_OSC_INPUT_MASK 0x8 + +#define HIBMC_PW_MODE_CTL_MODE(x) ((x) << 0) +#define HIBMC_PW_MODE_CTL_MODE_MASK 0x03 +#define HIBMC_PW_MODE_CTL_MODE_SHIFT 0 + +#define HIBMC_PW_MODE_CTL_MODE_MODE0 0 +#define HIBMC_PW_MODE_CTL_MODE_MODE1 1 +#define HIBMC_PW_MODE_CTL_MODE_SLEEP 2 + +#define HIBMC_PANEL_PLL_CTRL 0x00005C +#define HIBMC_CRT_PLL_CTRL 0x000060 + +#define HIBMC_PLL_CTRL_BYPASS(x) ((x) << 18) +#define HIBMC_PLL_CTRL_BYPASS_MASK 0x40000 + +#define HIBMC_PLL_CTRL_POWER(x) ((x) << 17) +#define HIBMC_PLL_CTRL_POWER_MASK 0x20000 + +#define HIBMC_PLL_CTRL_INPUT(x) ((x) << 16) +#define HIBMC_PLL_CTRL_INPUT_MASK 0x10000 + +#define HIBMC_PLL_CTRL_POD(x) ((x) << 14) +#define HIBMC_PLL_CTRL_POD_MASK 0xC000 + +#define HIBMC_PLL_CTRL_OD(x) ((x) << 12) +#define HIBMC_PLL_CTRL_OD_MASK 0x3000 + +#define HIBMC_PLL_CTRL_N(x) ((x) << 8) +#define HIBMC_PLL_CTRL_N_MASK 0xF00 + +#define HIBMC_PLL_CTRL_M(x) ((x) << 0) +#define HIBMC_PLL_CTRL_M_MASK 0xFF + +#define HIBMC_CRT_DISP_CTL 0x80200 + +#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25) +#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000 + +#define HIBMC_CRTSELECT_CRT 1 + +#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14) +#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000 + +#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13) +#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000 + +#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12) +#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000 + +#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8) +#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100 + +#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2) +#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4 + +#define HIBMC_CRT_DISP_CTL_FORMAT(x) ((x) << 0) +#define HIBMC_CRT_DISP_CTL_FORMAT_MASK 0x03 + +#define HIBMC_CRT_FB_ADDRESS 0x080204 + +#define HIBMC_CRT_FB_WIDTH 0x080208 +#define HIBMC_CRT_FB_WIDTH_WIDTH(x) ((x) << 16) +#define HIBMC_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000 +#define HIBMC_CRT_FB_WIDTH_OFFS(x) ((x) << 0) +#define HIBMC_CRT_FB_WIDTH_OFFS_MASK 0x3FFF + +#define HIBMC_CRT_HORZ_TOTAL 0x08020C +#define HIBMC_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16) +#define HIBMC_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000 + +#define HIBMC_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0) +#define HIBMC_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF + +#define HIBMC_CRT_HORZ_SYNC 0x080210 +#define HIBMC_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16) +#define HIBMC_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000 + +#define HIBMC_CRT_HORZ_SYNC_START(x) ((x) << 0) +#define HIBMC_CRT_HORZ_SYNC_START_MASK 0xFFF + +#define HIBMC_CRT_VERT_TOTAL 0x080214 +#define HIBMC_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16) +#define HIBMC_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000 + +#define HIBMC_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0) +#define HIBMC_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF + +#define HIBMC_CRT_VERT_SYNC 0x080218 +#define HIBMC_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16) +#define HIBMC_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000 + +#define HIBMC_CRT_VERT_SYNC_START(x) ((x) << 0) +#define HIBMC_CRT_VERT_SYNC_START_MASK 0x7FF + +/* Auto Centering */ +#define HIBMC_CRT_AUTO_CENTERING_TL 0x080280 +#define HIBMC_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16) +#define HIBMC_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000 + +#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0) +#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF + +#define HIBMC_CRT_AUTO_CENTERING_BR 0x080284 +#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16) +#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000 + +#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0) +#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF + +/* register to control panel output */ +#define HIBMC_DISPLAY_CONTROL_HISILE 0x80288 +#define HIBMC_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0) +#define HIBMC_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1) +#define HIBMC_DISPLAY_CONTROL_FPEN(x) ((x) << 2) +#define HIBMC_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3) + +#define HIBMC_RAW_INTERRUPT 0x80290 +#define HIBMC_RAW_INTERRUPT_VBLANK(x) ((x) << 2) +#define HIBMC_RAW_INTERRUPT_VBLANK_MASK 0x4 + +#define HIBMC_RAW_INTERRUPT_EN 0x80298 +#define HIBMC_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2) +#define HIBMC_RAW_INTERRUPT_EN_VBLANK_MASK 0x4 + +/* register and values for PLL control */ +#define CRT_PLL1_HS 0x802a8 +#define CRT_PLL1_HS_OUTER_BYPASS(x) ((x) << 30) +#define CRT_PLL1_HS_INTER_BYPASS(x) ((x) << 29) +#define CRT_PLL1_HS_POWERON(x) ((x) << 24) + +#define CRT_PLL1_HS_25MHZ 0x23d40f02 +#define CRT_PLL1_HS_40MHZ 0x23940801 +#define CRT_PLL1_HS_65MHZ 0x23940d01 +#define CRT_PLL1_HS_78MHZ 0x23540F82 +#define CRT_PLL1_HS_74MHZ 0x23941dc2 +#define CRT_PLL1_HS_80MHZ 0x23941001 +#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2 +#define CRT_PLL1_HS_108MHZ 0x23b41b01 +#define CRT_PLL1_HS_162MHZ 0x23480681 +#define CRT_PLL1_HS_148MHZ 0x23541dc2 +#define CRT_PLL1_HS_193MHZ 0x234807c1 + +#define CRT_PLL2_HS 0x802ac +#define CRT_PLL2_HS_25MHZ 0x206B851E +#define CRT_PLL2_HS_40MHZ 0x30000000 +#define CRT_PLL2_HS_65MHZ 0x40000000 +#define CRT_PLL2_HS_78MHZ 0x50E147AE +#define CRT_PLL2_HS_74MHZ 0x602B6AE7 +#define CRT_PLL2_HS_80MHZ 0x70000000 +#define CRT_PLL2_HS_108MHZ 0x80000000 +#define CRT_PLL2_HS_162MHZ 0xA0000000 +#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD +#define CRT_PLL2_HS_193MHZ 0xC0872B02 + +#define HIBMC_FIELD(field, value) (field(value) & field##_MASK) +#endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c new file mode 100644 index 000000000000..d1f67a9d4d86 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -0,0 +1,147 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "hibmc_drm_drv.h" +#include "hibmc_drm_regs.h" + +static int hibmc_connector_get_modes(struct drm_connector *connector) +{ + return drm_add_modes_noedid(connector, 800, 600); +} + +static int hibmc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static struct drm_encoder * +hibmc_connector_best_encoder(struct drm_connector *connector) +{ + return drm_encoder_find(connector->dev, connector->encoder_ids[0]); +} + +static enum drm_connector_status hibmc_connector_detect(struct drm_connector + *connector, bool force) +{ + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs + hibmc_connector_helper_funcs = { + .get_modes = hibmc_connector_get_modes, + .mode_valid = hibmc_connector_mode_valid, + .best_encoder = hibmc_connector_best_encoder, +}; + +static const struct drm_connector_funcs hibmc_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = hibmc_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static struct drm_connector * +hibmc_connector_init(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector *connector; + int ret; + + connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL); + if (!connector) { + DRM_ERROR("failed to alloc memory when init connector\n"); + return ERR_PTR(-ENOMEM); + } + + ret = drm_connector_init(dev, connector, + &hibmc_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ERR_PTR(ret); + } + drm_connector_helper_add(connector, + &hibmc_connector_helper_funcs); + + return connector; +} + +static void hibmc_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + u32 reg; + struct drm_device *dev = encoder->dev; + struct hibmc_drm_private *priv = dev->dev_private; + + reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE); + reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1); + reg |= HIBMC_DISPLAY_CONTROL_PANELDATE(1); + reg |= HIBMC_DISPLAY_CONTROL_FPEN(1); + reg |= HIBMC_DISPLAY_CONTROL_VBIASEN(1); + writel(reg, priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE); +} + +static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = { + .mode_set = hibmc_encoder_mode_set, +}; + +static const struct drm_encoder_funcs hibmc_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int hibmc_vdac_init(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + connector = hibmc_connector_init(priv); + if (IS_ERR(connector)) { + DRM_ERROR("failed to create connector: %ld\n", + PTR_ERR(connector)); + return PTR_ERR(connector); + } + + encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) { + DRM_ERROR("failed to alloc memory when init encoder\n"); + return -ENOMEM; + } + + encoder->possible_crtcs = 0x1; + ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + DRM_ERROR("failed to init encoder: %d\n", ret); + return ret; + } + + drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); + drm_mode_connector_attach_encoder(connector, encoder); + + return 0; +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c new file mode 100644 index 000000000000..e76abf61edae --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -0,0 +1,558 @@ +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Rongrong Zou <zourongrong@huawei.com> + * Rongrong Zou <zourongrong@gmail.com> + * Jianhua Li <lijianhua@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <drm/drm_atomic_helper.h> +#include <ttm/ttm_page_alloc.h> + +#include "hibmc_drm_drv.h" + +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) + +static inline struct hibmc_drm_private * +hibmc_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct hibmc_drm_private, bdev); +} + +static int +hibmc_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void +hibmc_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc) +{ + int ret; + + hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM; + hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global); + hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init; + hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release; + ret = drm_global_item_ref(&hibmc->mem_global_ref); + if (ret) { + DRM_ERROR("could not get ref on ttm global: %d\n", ret); + return ret; + } + + hibmc->bo_global_ref.mem_glob = + hibmc->mem_global_ref.object; + hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO; + hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global); + hibmc->bo_global_ref.ref.init = &ttm_bo_global_init; + hibmc->bo_global_ref.ref.release = &ttm_bo_global_release; + ret = drm_global_item_ref(&hibmc->bo_global_ref.ref); + if (ret) { + DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret); + drm_global_item_unref(&hibmc->mem_global_ref); + return ret; + } + return 0; +} + +static void +hibmc_ttm_global_release(struct hibmc_drm_private *hibmc) +{ + drm_global_item_unref(&hibmc->bo_global_ref.ref); + drm_global_item_unref(&hibmc->mem_global_ref); + hibmc->mem_global_ref.release = NULL; +} + +static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo) +{ + struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo); + + drm_gem_object_release(&bo->gem); + kfree(bo); +} + +static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object *bo) +{ + return bo->destroy == &hibmc_bo_ttm_destroy; +} + +static int +hibmc_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("unsupported memory type %u\n", type); + return -EINVAL; + } + return 0; +} + +void hibmc_ttm_placement(struct hibmc_bo *bo, int domain) +{ + u32 count = 0; + u32 i; + + bo->placement.placement = bo->placements; + bo->placement.busy_placement = bo->placements; + if (domain & TTM_PL_FLAG_VRAM) + bo->placements[count++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (domain & TTM_PL_FLAG_SYSTEM) + bo->placements[count++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + if (!count) + bo->placements[count++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + + bo->placement.num_placement = count; + bo->placement.num_busy_placement = count; + for (i = 0; i < count; i++) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } +} + +static void +hibmc_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct hibmc_bo *hibmcbo = hibmc_bo(bo); + + if (!hibmc_ttm_bo_is_hibmc_bo(bo)) + return; + + hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_SYSTEM); + *pl = hibmcbo->placement; +} + +static int hibmc_bo_verify_access(struct ttm_buffer_object *bo, + struct file *filp) +{ + struct hibmc_bo *hibmcbo = hibmc_bo(bo); + + return drm_vma_node_verify_access(&hibmcbo->gem.vma_node, + filp->private_data); +} + +static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct hibmc_drm_private *hibmc = hibmc_bdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(hibmc->dev->pdev, 0); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void hibmc_ttm_backend_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func hibmc_tt_backend_func = { + .destroy = &hibmc_ttm_backend_destroy, +}; + +static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, + u32 page_flags, + struct page *dummy_read_page) +{ + struct ttm_tt *tt; + int ret; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) { + DRM_ERROR("failed to allocate ttm_tt\n"); + return NULL; + } + tt->func = &hibmc_tt_backend_func; + ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page); + if (ret) { + DRM_ERROR("failed to initialize ttm_tt: %d\n", ret); + kfree(tt); + return NULL; + } + return tt; +} + +static int hibmc_ttm_tt_populate(struct ttm_tt *ttm) +{ + return ttm_pool_populate(ttm); +} + +static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + ttm_pool_unpopulate(ttm); +} + +struct ttm_bo_driver hibmc_bo_driver = { + .ttm_tt_create = hibmc_ttm_tt_create, + .ttm_tt_populate = hibmc_ttm_tt_populate, + .ttm_tt_unpopulate = hibmc_ttm_tt_unpopulate, + .init_mem_type = hibmc_bo_init_mem_type, + .evict_flags = hibmc_bo_evict_flags, + .move = NULL, + .verify_access = hibmc_bo_verify_access, + .io_mem_reserve = &hibmc_ttm_io_mem_reserve, + .io_mem_free = NULL, + .lru_tail = &ttm_bo_default_lru_tail, + .swap_lru_tail = &ttm_bo_default_swap_lru_tail, +}; + +int hibmc_mm_init(struct hibmc_drm_private *hibmc) +{ + int ret; + struct drm_device *dev = hibmc->dev; + struct ttm_bo_device *bdev = &hibmc->bdev; + + ret = hibmc_ttm_global_init(hibmc); + if (ret) + return ret; + + ret = ttm_bo_device_init(&hibmc->bdev, + hibmc->bo_global_ref.ref.object, + &hibmc_bo_driver, + dev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, + true); + if (ret) { + hibmc_ttm_global_release(hibmc); + DRM_ERROR("error initializing bo driver: %d\n", ret); + return ret; + } + + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + hibmc->fb_size >> PAGE_SHIFT); + if (ret) { + hibmc_ttm_global_release(hibmc); + DRM_ERROR("failed ttm VRAM init: %d\n", ret); + return ret; + } + + hibmc->mm_inited = true; + return 0; +} + +void hibmc_mm_fini(struct hibmc_drm_private *hibmc) +{ + if (!hibmc->mm_inited) + return; + + ttm_bo_device_release(&hibmc->bdev); + hibmc_ttm_global_release(hibmc); + hibmc->mm_inited = false; +} + +static void hibmc_bo_unref(struct hibmc_bo **bo) +{ + struct ttm_buffer_object *tbo; + + if ((*bo) == NULL) + return; + + tbo = &((*bo)->bo); + ttm_bo_unref(&tbo); + *bo = NULL; +} + +int hibmc_bo_create(struct drm_device *dev, int size, int align, + u32 flags, struct hibmc_bo **phibmcbo) +{ + struct hibmc_drm_private *hibmc = dev->dev_private; + struct hibmc_bo *hibmcbo; + size_t acc_size; + int ret; + + hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL); + if (!hibmcbo) { + DRM_ERROR("failed to allocate hibmcbo\n"); + return -ENOMEM; + } + ret = drm_gem_object_init(dev, &hibmcbo->gem, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + kfree(hibmcbo); + return ret; + } + + hibmcbo->bo.bdev = &hibmc->bdev; + + hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + + acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size, + sizeof(struct hibmc_bo)); + + ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size, + ttm_bo_type_device, &hibmcbo->placement, + align >> PAGE_SHIFT, false, NULL, acc_size, + NULL, NULL, hibmc_bo_ttm_destroy); + if (ret) { + hibmc_bo_unref(&hibmcbo); + DRM_ERROR("failed to initialize ttm_bo: %d\n", ret); + return ret; + } + + *phibmcbo = hibmcbo; + return 0; +} + +int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr) +{ + int i, ret; + + if (bo->pin_count) { + bo->pin_count++; + if (gpu_addr) + *gpu_addr = bo->bo.offset; + return 0; + } + + hibmc_ttm_placement(bo, pl_flag); + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) + return ret; + + bo->pin_count = 1; + if (gpu_addr) + *gpu_addr = bo->bo.offset; + return 0; +} + +int hibmc_bo_unpin(struct hibmc_bo *bo) +{ + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + for (i = 0; i < bo->placement.num_placement ; i++) + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) { + DRM_ERROR("validate failed for unpin: %d\n", ret); + return ret; + } + + return 0; +} + +int hibmc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct hibmc_drm_private *hibmc; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return -EINVAL; + + file_priv = filp->private_data; + hibmc = file_priv->minor->dev->dev_private; + return ttm_bo_mmap(filp, vma, &hibmc->bdev); +} + +int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel, + struct drm_gem_object **obj) +{ + struct hibmc_bo *hibmcbo; + int ret; + + *obj = NULL; + + size = PAGE_ALIGN(size); + if (size == 0) { + DRM_ERROR("error: zero size\n"); + return -EINVAL; + } + + ret = hibmc_bo_create(dev, size, 0, 0, &hibmcbo); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("failed to allocate GEM object: %d\n", ret); + return ret; + } + *obj = &hibmcbo->gem; + return 0; +} + +int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *gobj; + u32 handle; + int ret; + + args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16); + args->size = args->pitch * args->height; + + ret = hibmc_gem_create(dev, args->size, false, + &gobj); + if (ret) { + DRM_ERROR("failed to create GEM object: %d\n", ret); + return ret; + } + + ret = drm_gem_handle_create(file, gobj, &handle); + drm_gem_object_unreference_unlocked(gobj); + if (ret) { + DRM_ERROR("failed to unreference GEM object: %d\n", ret); + return ret; + } + + args->handle = handle; + return 0; +} + +void hibmc_gem_free_object(struct drm_gem_object *obj) +{ + struct hibmc_bo *hibmcbo = gem_to_hibmc_bo(obj); + + hibmc_bo_unref(&hibmcbo); +} + +static u64 hibmc_bo_mmap_offset(struct hibmc_bo *bo) +{ + return drm_vma_node_offset_addr(&bo->bo.vma_node); +} + +int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + struct hibmc_bo *bo; + + obj = drm_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + bo = gem_to_hibmc_bo(obj); + *offset = hibmc_bo_mmap_offset(bo); + + drm_gem_object_unreference_unlocked(obj); + return 0; +} + +static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb); + + drm_gem_object_unreference_unlocked(hibmc_fb->obj); + drm_framebuffer_cleanup(fb); + kfree(hibmc_fb); +} + +static const struct drm_framebuffer_funcs hibmc_fb_funcs = { + .destroy = hibmc_user_framebuffer_destroy, +}; + +struct hibmc_framebuffer * +hibmc_framebuffer_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + struct hibmc_framebuffer *hibmc_fb; + int ret; + + hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL); + if (!hibmc_fb) { + DRM_ERROR("failed to allocate hibmc_fb\n"); + return ERR_PTR(-ENOMEM); + } + + drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd); + hibmc_fb->obj = obj; + ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs); + if (ret) { + DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); + kfree(hibmc_fb); + return ERR_PTR(ret); + } + + return hibmc_fb; +} + +static struct drm_framebuffer * +hibmc_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj; + struct hibmc_framebuffer *hibmc_fb; + + DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n", + mode_cmd->width, mode_cmd->height, + (mode_cmd->pixel_format) & 0xff, + (mode_cmd->pixel_format >> 8) & 0xff, + (mode_cmd->pixel_format >> 16) & 0xff, + (mode_cmd->pixel_format >> 24) & 0xff); + + obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj); + if (IS_ERR(hibmc_fb)) { + drm_gem_object_unreference_unlocked(obj); + return ERR_PTR((long)hibmc_fb); + } + return &hibmc_fb->fb; +} + +const struct drm_mode_config_funcs hibmc_mode_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = hibmc_user_framebuffer_create, +}; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index e6e9537537cf..82235f30277c 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -52,7 +52,7 @@ #define GM107_DISP /* cl5070.h */ 0x00009470 #define GM200_DISP /* cl5070.h */ 0x00009570 #define GP100_DISP /* cl5070.h */ 0x00009770 -#define GP104_DISP /* cl5070.h */ 0x00009870 +#define GP102_DISP /* cl5070.h */ 0x00009870 #define NV31_MPEG 0x00003174 #define G82_MPEG 0x00008274 @@ -90,7 +90,7 @@ #define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d #define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d #define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d -#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d +#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d #define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e #define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h index d3d26a1e215d..b93f4c1a95e5 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h @@ -8,5 +8,5 @@ int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h index e82049667ce4..970ae753968a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h @@ -33,5 +33,5 @@ int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 65ce79a85d37..794e432578b2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -95,7 +95,7 @@ int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **); #include <subdev/bios.h> #include <subdev/bios/ramcfg.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index e61923d5e49c..f37538eb1fe5 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h @@ -35,6 +35,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); /* interface to MEMX process running on PMU */ struct nvkm_memx; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 5698687bc197..bd37ae127582 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -24,6 +24,7 @@ * */ +#include <acpi/video.h> #include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -348,6 +349,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { } \ } while(0) +#ifdef CONFIG_ACPI + +/* + * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch + * to the acpi subsys to move it there from drivers/acpi/acpi_video.c . + * This should be dropped once that is merged. + */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + +static void +nouveau_display_acpi_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), acpi_work); + + pm_runtime_get_sync(drm->dev->dev); + + drm_helper_hpd_irq_event(drm->dev); + + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); +} + +static int +nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); + struct acpi_bus_event *info = data; + + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { + /* + * This may be the only indication we receive of a + * connector hotplug on a runtime suspended GPU, + * schedule acpi_work to check. + */ + schedule_work(&drm->acpi_work); + + /* acpi-video should not generate keypresses for this */ + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} +#endif + int nouveau_display_init(struct drm_device *dev) { @@ -488,7 +538,7 @@ nouveau_display_create(struct drm_device *dev) if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { static const u16 oclass[] = { - GP104_DISP, + GP102_DISP, GP100_DISP, GM200_DISP, GM107_DISP, @@ -532,6 +582,12 @@ nouveau_display_create(struct drm_device *dev) } nouveau_backlight_init(dev); +#ifdef CONFIG_ACPI + INIT_WORK(&drm->acpi_work, nouveau_display_acpi_work); + drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; + register_acpi_notifier(&drm->acpi_nb); +#endif + return 0; vblank_err: @@ -547,6 +603,9 @@ nouveau_display_destroy(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb); +#endif nouveau_backlight_exit(dev); nouveau_display_vblank_fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4cd47bae73c7..ae1fd641c96e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -37,6 +37,8 @@ * - implemented limited ABI16/NVIF interop */ +#include <linux/notifier.h> + #include <nvif/client.h> #include <nvif/device.h> #include <nvif/ioctl.h> @@ -161,6 +163,10 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct backlight_device *backlight; +#ifdef CONFIG_ACPI + struct notifier_block acpi_nb; + struct work_struct acpi_work; +#endif /* power management */ struct nouveau_hwmon *hwmon; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a9855a4ec532..22a8b70a4d1e 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -574,7 +574,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp, .pushbuf = 0xb0007d00, }; static const s32 oclass[] = { - GP104_DISP_CORE_CHANNEL_DMA, + GP102_DISP_CORE_CHANNEL_DMA, GP100_DISP_CORE_CHANNEL_DMA, GM200_DISP_CORE_CHANNEL_DMA, GM107_DISP_CORE_CHANNEL_DMA, @@ -3343,12 +3343,15 @@ nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) if (!mstm) return 0; - if (dpcd[0] >= 0x12 && allow) { + if (dpcd[0] >= 0x12) { ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); if (ret < 0) return ret; - state = dpcd[1] & DP_MST_CAP; + if (!(dpcd[1] & DP_MST_CAP)) + dpcd[0] = 0x11; + else + state = allow; } ret = nv50_mstm_enable(mstm, dpcd[0], state); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index a4458a8eb30a..255d81ccf916 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -4,4 +4,4 @@ nvkm-y += nvkm/engine/ce/gk104.o nvkm-y += nvkm/engine/ce/gm107.o nvkm-y += nvkm/engine/ce/gm200.o nvkm-y += nvkm/engine/ce/gp100.o -nvkm-y += nvkm/engine/ce/gp104.o +nvkm-y += nvkm/engine/ce/gp102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c index 20e019788a53..985c8f653874 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c @@ -27,7 +27,7 @@ #include <nvif/class.h> static const struct nvkm_engine_func -gp104_ce = { +gp102_ce = { .intr = gp100_ce_intr, .sclass = { { -1, -1, PASCAL_DMA_COPY_B }, @@ -37,8 +37,8 @@ gp104_ce = { }; int -gp104_ce_new(struct nvkm_device *device, int index, +gp102_ce_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gp104_ce, device, index, true, pengine); + return nvkm_engine_new_(&gp102_ce, device, index, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bd22526edb0b..2cbcffe78c3e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2167,6 +2167,7 @@ nv130_chipset = { .mmu = gf100_mmu_new, .secboot = gm200_secboot_new, .pci = gp100_pci_new, + .pmu = gp100_pmu_new, .timer = gk20a_timer_new, .top = gk104_top_new, .ce[0] = gp100_ce_new, @@ -2183,13 +2184,42 @@ nv130_chipset = { }; static const struct nvkm_device_chip +nv132_chipset = { + .name = "GP102", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp102_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp102_ce_new, + .ce[1] = gp102_ce_new, + .ce[2] = gp102_ce_new, + .ce[3] = gp102_ce_new, + .disp = gp102_disp_new, + .dma = gf119_dma_new, + .fifo = gp100_fifo_new, +}; + +static const struct nvkm_device_chip nv134_chipset = { .name = "GP104", .bar = gf100_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, .devinit = gm200_devinit_new, - .fb = gp104_fb_new, + .fb = gp102_fb_new, .fuse = gm107_fuse_new, .gpio = gk104_gpio_new, .i2c = gm200_i2c_new, @@ -2199,13 +2229,14 @@ nv134_chipset = { .mc = gp100_mc_new, .mmu = gf100_mmu_new, .pci = gp100_pci_new, + .pmu = gp102_pmu_new, .timer = gk20a_timer_new, .top = gk104_top_new, - .ce[0] = gp104_ce_new, - .ce[1] = gp104_ce_new, - .ce[2] = gp104_ce_new, - .ce[3] = gp104_ce_new, - .disp = gp104_disp_new, + .ce[0] = gp102_ce_new, + .ce[1] = gp102_ce_new, + .ce[2] = gp102_ce_new, + .ce[3] = gp102_ce_new, + .disp = gp102_disp_new, .dma = gf119_dma_new, .fifo = gp100_fifo_new, }; @@ -2644,6 +2675,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x126: device->chip = &nv126_chipset; break; case 0x12b: device->chip = &nv12b_chipset; break; case 0x130: device->chip = &nv130_chipset; break; + case 0x132: device->chip = &nv132_chipset; break; case 0x134: device->chip = &nv134_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 0030cd9543b2..74a1ffa425f7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1687,7 +1687,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg, * This is necessary for platforms where the default DMA mask of 32 * does not cover any system memory, i.e., when all RAM is > 4 GB. */ - if (subdev_mask & BIT(NVKM_SUBDEV_MMU)) + if (pdev->device.mmu) dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(pdev->device.mmu->dma_bits)); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index 77a52b54a31e..fa05d16ae948 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -11,7 +11,7 @@ nvkm-y += nvkm/engine/disp/gk110.o nvkm-y += nvkm/engine/disp/gm107.o nvkm-y += nvkm/engine/disp/gm200.o nvkm-y += nvkm/engine/disp/gp100.o -nvkm-y += nvkm/engine/disp/gp104.o +nvkm-y += nvkm/engine/disp/gp102.o nvkm-y += nvkm/engine/disp/outp.o nvkm-y += nvkm/engine/disp/outpdp.o @@ -48,14 +48,14 @@ nvkm-y += nvkm/engine/disp/rootgk110.o nvkm-y += nvkm/engine/disp/rootgm107.o nvkm-y += nvkm/engine/disp/rootgm200.o nvkm-y += nvkm/engine/disp/rootgp100.o -nvkm-y += nvkm/engine/disp/rootgp104.o +nvkm-y += nvkm/engine/disp/rootgp102.o nvkm-y += nvkm/engine/disp/channv50.o nvkm-y += nvkm/engine/disp/changf119.o nvkm-y += nvkm/engine/disp/dmacnv50.o nvkm-y += nvkm/engine/disp/dmacgf119.o -nvkm-y += nvkm/engine/disp/dmacgp104.o +nvkm-y += nvkm/engine/disp/dmacgp102.o nvkm-y += nvkm/engine/disp/basenv50.o nvkm-y += nvkm/engine/disp/baseg84.o @@ -64,7 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o nvkm-y += nvkm/engine/disp/basegf119.o nvkm-y += nvkm/engine/disp/basegk104.o nvkm-y += nvkm/engine/disp/basegk110.o -nvkm-y += nvkm/engine/disp/basegp104.o +nvkm-y += nvkm/engine/disp/basegp102.o nvkm-y += nvkm/engine/disp/corenv50.o nvkm-y += nvkm/engine/disp/coreg84.o @@ -77,7 +77,7 @@ nvkm-y += nvkm/engine/disp/coregk110.o nvkm-y += nvkm/engine/disp/coregm107.o nvkm-y += nvkm/engine/disp/coregm200.o nvkm-y += nvkm/engine/disp/coregp100.o -nvkm-y += nvkm/engine/disp/coregp104.o +nvkm-y += nvkm/engine/disp/coregp102.o nvkm-y += nvkm/engine/disp/ovlynv50.o nvkm-y += nvkm/engine/disp/ovlyg84.o @@ -85,7 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o nvkm-y += nvkm/engine/disp/ovlygt215.o nvkm-y += nvkm/engine/disp/ovlygf119.o nvkm-y += nvkm/engine/disp/ovlygk104.o -nvkm-y += nvkm/engine/disp/ovlygp104.o +nvkm-y += nvkm/engine/disp/ovlygp102.o nvkm-y += nvkm/engine/disp/piocnv50.o nvkm-y += nvkm/engine/disp/piocgf119.o @@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o nvkm-y += nvkm/engine/disp/cursgt215.o nvkm-y += nvkm/engine/disp/cursgf119.o nvkm-y += nvkm/engine/disp/cursgk104.o +nvkm-y += nvkm/engine/disp/cursgp102.o nvkm-y += nvkm/engine/disp/oimmnv50.o nvkm-y += nvkm/engine/disp/oimmg84.o nvkm-y += nvkm/engine/disp/oimmgt215.o nvkm-y += nvkm/engine/disp/oimmgf119.o nvkm-y += nvkm/engine/disp/oimmgk104.o +nvkm-y += nvkm/engine/disp/oimmgp102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c index 51688e37c54e..8a3cdeef8d2c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c @@ -27,12 +27,12 @@ #include <nvif/class.h> const struct nv50_disp_dmac_oclass -gp104_disp_base_oclass = { +gp102_disp_base_oclass = { .base.oclass = GK110_DISP_BASE_CHANNEL_DMA, .base.minver = 0, .base.maxver = 0, .ctor = nv50_disp_base_new, - .func = &gp104_disp_dmac_func, + .func = &gp102_disp_dmac_func, .mthd = &gf119_disp_base_chan_mthd, .chid = 1, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c index 26990d44ae75..524a24eae1a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c @@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) if (mthd->addr) { snprintf(cname_, sizeof(cname_), "%s %d", - mthd->name, chan->chid); + mthd->name, chan->chid.user); cname = cname_; } @@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { notify->size = sizeof(struct nvif_notify_uevent_rep); notify->types = 1; - notify->index = chan->chid; + notify->index = chan->chid.user; return 0; } @@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) struct nv50_disp_chan *chan = nv50_disp_chan(object); struct nv50_disp *disp = chan->root->disp; struct nvkm_device *device = disp->base.engine.subdev.device; - *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); + *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr); return 0; } @@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) struct nv50_disp_chan *chan = nv50_disp_chan(object); struct nv50_disp *disp = chan->root->disp; struct nvkm_device *device = disp->base.engine.subdev.device; - nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); + nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data); return 0; } @@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) struct nv50_disp *disp = chan->root->disp; struct nvkm_device *device = disp->base.engine.subdev.device; *addr = device->func->resource_addr(device, 0) + - 0x640000 + (chan->chid * 0x1000); + 0x640000 + (chan->chid.user * 0x1000); *size = 0x001000; return 0; } @@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object) { struct nv50_disp_chan *chan = nv50_disp_chan(object); struct nv50_disp *disp = chan->root->disp; - if (chan->chid >= 0) - disp->chan[chan->chid] = NULL; + if (chan->chid.user >= 0) + disp->chan[chan->chid.user] = NULL; return chan->func->dtor ? chan->func->dtor(chan) : chan; } @@ -263,7 +263,7 @@ nv50_disp_chan = { int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, const struct nv50_disp_chan_mthd *mthd, - struct nv50_disp_root *root, int chid, int head, + struct nv50_disp_root *root, int ctrl, int user, int head, const struct nvkm_oclass *oclass, struct nv50_disp_chan *chan) { @@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, chan->func = func; chan->mthd = mthd; chan->root = root; - chan->chid = chid; + chan->chid.ctrl = ctrl; + chan->chid.user = user; chan->head = head; - if (disp->chan[chan->chid]) { - chan->chid = -1; + if (disp->chan[chan->chid.user]) { + chan->chid.user = -1; return -EBUSY; } - disp->chan[chan->chid] = chan; + disp->chan[chan->chid.user] = chan; return 0; } int nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, const struct nv50_disp_chan_mthd *mthd, - struct nv50_disp_root *root, int chid, int head, + struct nv50_disp_root *root, int ctrl, int user, int head, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { @@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, return -ENOMEM; *pobject = &chan->object; - return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan); + return nv50_disp_chan_ctor(func, mthd, root, ctrl, user, + head, oclass, chan); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h index f5f683d9fd20..737b38f6fbd2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h @@ -7,7 +7,11 @@ struct nv50_disp_chan { const struct nv50_disp_chan_func *func; const struct nv50_disp_chan_mthd *mthd; struct nv50_disp_root *root; - int chid; + + struct { + int ctrl; + int user; + } chid; int head; struct nvkm_object object; @@ -25,11 +29,11 @@ struct nv50_disp_chan_func { int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *, const struct nv50_disp_chan_mthd *, - struct nv50_disp_root *, int chid, int head, + struct nv50_disp_root *, int ctrl, int user, int head, const struct nvkm_oclass *, struct nv50_disp_chan *); int nv50_disp_chan_new_(const struct nv50_disp_chan_func *, const struct nv50_disp_chan_mthd *, - struct nv50_disp_root *, int chid, int head, + struct nv50_disp_root *, int ctrl, int user, int head, const struct nvkm_oclass *, struct nvkm_object **); extern const struct nv50_disp_chan_func nv50_disp_pioc_func; @@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; struct nv50_disp_pioc_oclass { int (*ctor)(const struct nv50_disp_chan_func *, const struct nv50_disp_chan_mthd *, - struct nv50_disp_root *, int chid, + struct nv50_disp_root *, int ctrl, int user, const struct nvkm_oclass *, void *data, u32 size, struct nvkm_object **); struct nvkm_sclass base; const struct nv50_disp_chan_func *func; const struct nv50_disp_chan_mthd *mthd; - int chid; + struct { + int ctrl; + int user; + } chid; }; extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass; @@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass; extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass; extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass; +extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass; +extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass; int nv50_disp_curs_new(const struct nv50_disp_chan_func *, const struct nv50_disp_chan_mthd *, - struct nv50_disp_root *, int chid, + struct nv50_disp_root *, int ctrl, int user, const struct nvkm_oclass *, void *data, u32 size, struct nvkm_object **); int nv50_disp_oimm_new(const struct nv50_disp_chan_func *, const struct nv50_disp_chan_mthd *, - struct nv50_disp_root *, int chid, + struct nv50_disp_root *, int ctrl, int user, const struct nvkm_oclass *, void *data, u32 size, struct nvkm_object **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c index e356f87fbe60..b0df4b752b8c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c @@ -29,7 +29,7 @@ #include <nvif/class.h> static int -gp104_disp_core_init(struct nv50_disp_dmac *chan) +gp102_disp_core_init(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; @@ -60,19 +60,19 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan) } static const struct nv50_disp_dmac_func -gp104_disp_core_func = { - .init = gp104_disp_core_init, +gp102_disp_core_func = { + .init = gp102_disp_core_init, .fini = gf119_disp_core_fini, .bind = gf119_disp_dmac_bind, }; const struct nv50_disp_dmac_oclass -gp104_disp_core_oclass = { - .base.oclass = GP104_DISP_CORE_CHANNEL_DMA, +gp102_disp_core_oclass = { + .base.oclass = GP102_DISP_CORE_CHANNEL_DMA, .base.minver = 0, .base.maxver = 0, .ctor = nv50_disp_core_new, - .func = &gp104_disp_core_func, + .func = &gp102_disp_core_func, .mthd = &gk104_disp_core_chan_mthd, .chid = 0, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c index dd99fc7060b1..fa781b5a7e07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c @@ -33,5 +33,5 @@ g84_disp_curs_oclass = { .base.maxver = 0, .ctor = nv50_disp_curs_new, .func = &nv50_disp_pioc_func, - .chid = 7, + .chid = { 7, 7 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c index 2a1574e06ad6..2be6fb052c65 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c @@ -33,5 +33,5 @@ gf119_disp_curs_oclass = { .base.maxver = 0, .ctor = nv50_disp_curs_new, .func = &gf119_disp_pioc_func, - .chid = 13, + .chid = { 13, 13 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c index 28e8f06c9472..2a99db4bf8f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c @@ -33,5 +33,5 @@ gk104_disp_curs_oclass = { .base.maxver = 0, .ctor = nv50_disp_curs_new, .func = &gf119_disp_pioc_func, - .chid = 13, + .chid = { 13, 13 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c new file mode 100644 index 000000000000..e958210d8105 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c @@ -0,0 +1,37 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "channv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_pioc_oclass +gp102_disp_curs_oclass = { + .base.oclass = GK104_DISP_CURSOR, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_curs_new, + .func = &gf119_disp_pioc_func, + .chid = { 13, 17 }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c index d8a4b9ca139c..00a7f3564450 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c @@ -33,5 +33,5 @@ gt215_disp_curs_oclass = { .base.maxver = 0, .ctor = nv50_disp_curs_new, .func = &nv50_disp_pioc_func, - .chid = 7, + .chid = { 7, 7 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c index 8b1320499a0f..82ff82d8c1ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c @@ -33,7 +33,7 @@ int nv50_disp_curs_new(const struct nv50_disp_chan_func *func, const struct nv50_disp_chan_mthd *mthd, - struct nv50_disp_root *root, int chid, + struct nv50_disp_root *root, int ctrl, int user, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { @@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func, } else return ret; - return nv50_disp_chan_new_(func, mthd, root, chid + head, + return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, head, oclass, pobject); } @@ -65,5 +65,5 @@ nv50_disp_curs_oclass = { .base.maxver = 0, .ctor = nv50_disp_curs_new, .func = &nv50_disp_pioc_func, - .chid = 7, + .chid = { 7, 7 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c index a57f7cef307a..ce7cd74fbd5d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c @@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, struct nvkm_object *object, u32 handle) { return nvkm_ramht_insert(chan->base.root->ramht, object, - chan->base.chid, -9, handle, - chan->base.chid << 27 | 0x00000001); + chan->base.chid.user, -9, handle, + chan->base.chid.user << 27 | 0x00000001); } void @@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->base.chid; + int ctrl = chan->base.chid.ctrl; + int user = chan->base.chid.user; /* deactivate channel */ - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000); + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000); if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000)) break; ) < 0) { - nvkm_error(subdev, "ch %d fini: %08x\n", chid, - nvkm_rd32(device, 0x610490 + (chid * 0x10))); + nvkm_error(subdev, "ch %d fini: %08x\n", user, + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); } /* disable error reporting and completion notification */ - nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); + nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); } static int @@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan) struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->base.chid; + int ctrl = chan->base.chid.ctrl; + int user = chan->base.chid.user; /* enable error reporting */ - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); /* initialise channel for dma command submission */ - nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); - nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); - nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); - nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); + nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push); + nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000); + nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001); + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); + nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); /* wait for it to go inactive */ if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) break; ) < 0) { - nvkm_error(subdev, "ch %d init: %08x\n", chid, - nvkm_rd32(device, 0x610490 + (chid * 0x10))); + nvkm_error(subdev, "ch %d init: %08x\n", user, + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c index ad24c2c57696..cdead9500343 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c @@ -27,31 +27,32 @@ #include <subdev/timer.h> static int -gp104_disp_dmac_init(struct nv50_disp_dmac *chan) +gp102_disp_dmac_init(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->base.chid; + int ctrl = chan->base.chid.ctrl; + int user = chan->base.chid.user; /* enable error reporting */ - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); /* initialise channel for dma command submission */ - nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); - nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); - nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); - nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); + nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push); + nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000); + nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001); + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); + nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); /* wait for it to go inactive */ if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) break; ) < 0) { - nvkm_error(subdev, "ch %d init: %08x\n", chid, - nvkm_rd32(device, 0x610490 + (chid * 0x10))); + nvkm_error(subdev, "ch %d init: %08x\n", user, + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); return -EBUSY; } @@ -59,8 +60,8 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan) } const struct nv50_disp_dmac_func -gp104_disp_dmac_func = { - .init = gp104_disp_dmac_init, +gp102_disp_dmac_func = { + .init = gp102_disp_dmac_init, .fini = gf119_disp_dmac_fini, .bind = gf119_disp_dmac_bind, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c index 9c6645a357b9..0a1381a84552 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c @@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, chan->func = func; ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root, - chid, head, oclass, &chan->base); + chid, chid, head, oclass, &chan->base); if (ret) return ret; @@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan, struct nvkm_object *object, u32 handle) { return nvkm_ramht_insert(chan->base.root->ramht, object, - chan->base.chid, -10, handle, - chan->base.chid << 28 | - chan->base.chid); + chan->base.chid.user, -10, handle, + chan->base.chid.user << 28 | + chan->base.chid.user); } static void @@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan) struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->base.chid; + int ctrl = chan->base.chid.ctrl; + int user = chan->base.chid.user; /* deactivate channel */ - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000); + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000); if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000)) break; ) < 0) { - nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, - nvkm_rd32(device, 0x610200 + (chid * 0x10))); + nvkm_error(subdev, "ch %d fini timeout, %08x\n", user, + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); } /* disable error reporting and completion notifications */ - nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); + nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user); } static int @@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan) struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->base.chid; + int ctrl = chan->base.chid.ctrl; + int user = chan->base.chid.user; /* enable error reporting */ - nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); + nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user); /* initialise channel for dma command submission */ - nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push); - nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); - nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); - nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); + nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push); + nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000); + nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl); + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); + nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013); /* wait for it to go inactive */ if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000)) break; ) < 0) { - nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, - nvkm_rd32(device, 0x610200 + (chid * 0x10))); + nvkm_error(subdev, "ch %d init timeout, %08x\n", user, + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h index 43ac05857853..ea4a0d062e31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h @@ -30,7 +30,7 @@ int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); extern const struct nv50_disp_dmac_func gf119_disp_core_func; void gf119_disp_core_fini(struct nv50_disp_dmac *); -extern const struct nv50_disp_dmac_func gp104_disp_dmac_func; +extern const struct nv50_disp_dmac_func gp102_disp_dmac_func; struct nv50_disp_dmac_oclass { int (*ctor)(const struct nv50_disp_dmac_func *, @@ -95,7 +95,7 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass; extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass; -extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass; -extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass; -extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass; +extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass; +extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass; +extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c index 3bf3380336e4..f5d613f82709 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c @@ -25,7 +25,7 @@ #include "rootnv50.h" static void -gp104_disp_intr_error(struct nv50_disp *disp, int chid) +gp102_disp_intr_error(struct nv50_disp *disp, int chid) { struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; @@ -51,12 +51,12 @@ gp104_disp_intr_error(struct nv50_disp *disp, int chid) } static const struct nv50_disp_func -gp104_disp = { +gp102_disp = { .intr = gf119_disp_intr, - .intr_error = gp104_disp_intr_error, + .intr_error = gp102_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, - .root = &gp104_disp_root_oclass, + .root = &gp102_disp_root_oclass, .head.vblank_init = gf119_disp_vblank_init, .head.vblank_fini = gf119_disp_vblank_fini, .head.scanoutpos = gf119_disp_root_scanoutpos, @@ -75,7 +75,7 @@ gp104_disp = { }; int -gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) { - return gf119_disp_new_(&gp104_disp, device, index, pdisp); + return gf119_disp_new_(&gp102_disp, device, index, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c index 54a4ae8d66c6..5ad5d0f5db05 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c @@ -33,5 +33,5 @@ g84_disp_oimm_oclass = { .base.maxver = 0, .ctor = nv50_disp_oimm_new, .func = &nv50_disp_pioc_func, - .chid = 5, + .chid = { 5, 5 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c index c658db54afc5..1f9fd3403f07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c @@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = { .base.maxver = 0, .ctor = nv50_disp_oimm_new, .func = &gf119_disp_pioc_func, - .chid = 9, + .chid = { 9, 9 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c index b1fde8c125d6..0c09fe85e952 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c @@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = { .base.maxver = 0, .ctor = nv50_disp_oimm_new, .func = &gf119_disp_pioc_func, - .chid = 9, + .chid = { 9, 9 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c new file mode 100644 index 000000000000..abf82365c671 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c @@ -0,0 +1,37 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "channv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_pioc_oclass +gp102_disp_oimm_oclass = { + .base.oclass = GK104_DISP_OVERLAY, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_oimm_new, + .func = &gf119_disp_pioc_func, + .chid = { 9, 13 }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c index f4e7eb3d1177..1281db28aebd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c @@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = { .base.maxver = 0, .ctor = nv50_disp_oimm_new, .func = &nv50_disp_pioc_func, - .chid = 5, + .chid = { 5, 5 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c index 3940b9c966ec..07540f3d32dc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c @@ -33,7 +33,7 @@ int nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, const struct nv50_disp_chan_mthd *mthd, - struct nv50_disp_root *root, int chid, + struct nv50_disp_root *root, int ctrl, int user, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { @@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, } else return ret; - return nv50_disp_chan_new_(func, mthd, root, chid + head, + return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, head, oclass, pobject); } @@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = { .base.maxver = 0, .ctor = nv50_disp_oimm_new, .func = &nv50_disp_pioc_func, - .chid = 5, + .chid = { 5, 5 }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c index 97e2dd2d908e..589bd2f12b41 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c @@ -27,12 +27,12 @@ #include <nvif/class.h> const struct nv50_disp_dmac_oclass -gp104_disp_ovly_oclass = { +gp102_disp_ovly_oclass = { .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA, .base.minver = 0, .base.maxver = 0, .ctor = nv50_disp_ovly_new, - .func = &gp104_disp_dmac_func, + .func = &gp102_disp_dmac_func, .mthd = &gk104_disp_ovly_chan_mthd, .chid = 5, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c index a625a9876e34..0abaa6431943 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c @@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan) struct nv50_disp *disp = chan->root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->chid; + int ctrl = chan->chid.ctrl; + int user = chan->chid.user; - nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); + nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000); if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000)) break; ) < 0) { - nvkm_error(subdev, "ch %d fini: %08x\n", chid, - nvkm_rd32(device, 0x610490 + (chid * 0x10))); + nvkm_error(subdev, "ch %d fini: %08x\n", user, + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); } /* disable error reporting and completion notification */ - nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); + nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); } static int @@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan) struct nv50_disp *disp = chan->root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->chid; + int ctrl = chan->chid.ctrl; + int user = chan->chid.user; /* enable error reporting */ - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); /* activate channel */ - nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); + nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001); if (nvkm_msec(device, 2000, - u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); + u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10)); if ((tmp & 0x00030000) == 0x00010000) break; ) < 0) { - nvkm_error(subdev, "ch %d init: %08x\n", chid, - nvkm_rd32(device, 0x610490 + (chid * 0x10))); + nvkm_error(subdev, "ch %d init: %08x\n", user, + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c index 9d2618dacf20..0211e0e8a35f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c @@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan) struct nv50_disp *disp = chan->root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->chid; + int ctrl = chan->chid.ctrl; + int user = chan->chid.user; - nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); + nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000); if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) break; ) < 0) { - nvkm_error(subdev, "ch %d timeout: %08x\n", chid, - nvkm_rd32(device, 0x610200 + (chid * 0x10))); + nvkm_error(subdev, "ch %d timeout: %08x\n", user, + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); } } @@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan) struct nv50_disp *disp = chan->root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; - int chid = chan->chid; + int ctrl = chan->chid.ctrl; + int user = chan->chid.user; - nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); + nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000); if (nvkm_msec(device, 2000, - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) break; ) < 0) { - nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, - nvkm_rd32(device, 0x610200 + (chid * 0x10))); + nvkm_error(subdev, "ch %d timeout0: %08x\n", user, + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); return -EBUSY; } - nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); + nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001); if (nvkm_msec(device, 2000, - u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); + u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10)); if ((tmp & 0x00030000) == 0x00010000) break; ) < 0) { - nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, - nvkm_rd32(device, 0x610200 + (chid * 0x10))); + nvkm_error(subdev, "ch %d timeout1: %08x\n", user, + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c index 8443e04dc626..37122ca579ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c @@ -27,32 +27,32 @@ #include <nvif/class.h> static const struct nv50_disp_root_func -gp104_disp_root = { +gp102_disp_root = { .init = gf119_disp_root_init, .fini = gf119_disp_root_fini, .dmac = { - &gp104_disp_core_oclass, - &gp104_disp_base_oclass, - &gp104_disp_ovly_oclass, + &gp102_disp_core_oclass, + &gp102_disp_base_oclass, + &gp102_disp_ovly_oclass, }, .pioc = { - &gk104_disp_oimm_oclass, - &gk104_disp_curs_oclass, + &gp102_disp_oimm_oclass, + &gp102_disp_curs_oclass, }, }; static int -gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, +gp102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { - return nv50_disp_root_new_(&gp104_disp_root, disp, oclass, + return nv50_disp_root_new_(&gp102_disp_root, disp, oclass, data, size, pobject); } const struct nvkm_disp_oclass -gp104_disp_root_oclass = { - .base.oclass = GP104_DISP, +gp102_disp_root_oclass = { + .base.oclass = GP102_DISP, .base.minver = -1, .base.maxver = -1, - .ctor = gp104_disp_root_new, + .ctor = gp102_disp_root_new, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c index c1158b22a721..e70dc6a9ff7d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c @@ -250,8 +250,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass, { const struct nv50_disp_pioc_oclass *sclass = oclass->priv; struct nv50_disp_root *root = nv50_disp_root(oclass->parent); - return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid, - oclass, data, size, pobject); + return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl, + sclass->chid.user, oclass, data, size, pobject); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h index ad00f1724b72..b147cf5b3518 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h @@ -41,5 +41,5 @@ extern const struct nvkm_disp_oclass gk110_disp_root_oclass; extern const struct nvkm_disp_oclass gm107_disp_root_oclass; extern const struct nvkm_disp_oclass gm200_disp_root_oclass; extern const struct nvkm_disp_oclass gp100_disp_root_oclass; -extern const struct nvkm_disp_oclass gp104_disp_root_oclass; +extern const struct nvkm_disp_oclass gp102_disp_root_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c index a410c0db8a08..42d94731655a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c @@ -26,6 +26,7 @@ #include <subdev/bios.h> #include <subdev/bios/bit.h> #include <subdev/bios/pmu.h> +#include <subdev/timer.h> static void pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec) @@ -123,15 +124,6 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) return -EINVAL; } - /* reset PMU and load init table parser ucode */ - if (post) { - nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); - nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); - nvkm_rd32(device, 0x000200); - while (nvkm_rd32(device, 0x10a10c) & 0x00000006) { - } - } - ret = pmu_load(init, 0x04, post, &exec, &args); if (ret) return ret; @@ -156,8 +148,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) if (post) { nvkm_wr32(device, 0x10a040, 0x00005000); pmu_exec(init, exec); - while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) { - } + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x10a040) & 0x00002000) + break; + ) < 0) + return -ETIMEDOUT; } /* load and execute some other ucode image (bios therm?) */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index ef47d57fcb87..63566ba12fbb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -26,7 +26,7 @@ nvkm-y += nvkm/subdev/fb/gm107.o nvkm-y += nvkm/subdev/fb/gm200.o nvkm-y += nvkm/subdev/fb/gm20b.o nvkm-y += nvkm/subdev/fb/gp100.o -nvkm-y += nvkm/subdev/fb/gp104.o +nvkm-y += nvkm/subdev/fb/gp102.o nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index 92cb71861bec..73b4ae1c73dc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -27,7 +27,7 @@ #include <core/memory.h> static const struct nvkm_fb_func -gp104_fb = { +gp102_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gp100_fb_init, @@ -37,7 +37,7 @@ gp104_fb = { }; int -gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp104_fb, device, index, pfb); + return gf100_fb_new_(&gp102_fb, device, index, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild index 88b643b8664e..51fb4bf94a44 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild @@ -8,3 +8,5 @@ nvkm-y += nvkm/subdev/pmu/gk110.o nvkm-y += nvkm/subdev/pmu/gk208.o nvkm-y += nvkm/subdev/pmu/gk20a.o nvkm-y += nvkm/subdev/pmu/gm107.o +nvkm-y += nvkm/subdev/pmu/gp100.o +nvkm-y += nvkm/subdev/pmu/gp102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index 8dd164d13043..e611ce80f8ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -32,225 +32,85 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) pmu->func->pgob(pmu, enable); } -int -nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], - u32 process, u32 message, u32 data0, u32 data1) -{ - struct nvkm_subdev *subdev = &pmu->subdev; - struct nvkm_device *device = subdev->device; - u32 addr; - - mutex_lock(&subdev->mutex); - /* wait for a free slot in the fifo */ - addr = nvkm_rd32(device, 0x10a4a0); - if (nvkm_msec(device, 2000, - u32 tmp = nvkm_rd32(device, 0x10a4b0); - if (tmp != (addr ^ 8)) - break; - ) < 0) { - mutex_unlock(&subdev->mutex); - return -EBUSY; - } - - /* we currently only support a single process at a time waiting - * on a synchronous reply, take the PMU mutex and tell the - * receive handler what we're waiting for - */ - if (reply) { - pmu->recv.message = message; - pmu->recv.process = process; - } - - /* acquire data segment access */ - do { - nvkm_wr32(device, 0x10a580, 0x00000001); - } while (nvkm_rd32(device, 0x10a580) != 0x00000001); - - /* write the packet */ - nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + - pmu->send.base)); - nvkm_wr32(device, 0x10a1c4, process); - nvkm_wr32(device, 0x10a1c4, message); - nvkm_wr32(device, 0x10a1c4, data0); - nvkm_wr32(device, 0x10a1c4, data1); - nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); - - /* release data segment access */ - nvkm_wr32(device, 0x10a580, 0x00000000); - - /* wait for reply, if requested */ - if (reply) { - wait_event(pmu->recv.wait, (pmu->recv.process == 0)); - reply[0] = pmu->recv.data[0]; - reply[1] = pmu->recv.data[1]; - } - - mutex_unlock(&subdev->mutex); - return 0; -} - static void nvkm_pmu_recv(struct work_struct *work) { - struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); - struct nvkm_subdev *subdev = &pmu->subdev; - struct nvkm_device *device = subdev->device; - u32 process, message, data0, data1; - - /* nothing to do if GET == PUT */ - u32 addr = nvkm_rd32(device, 0x10a4cc); - if (addr == nvkm_rd32(device, 0x10a4c8)) - return; - - /* acquire data segment access */ - do { - nvkm_wr32(device, 0x10a580, 0x00000002); - } while (nvkm_rd32(device, 0x10a580) != 0x00000002); - - /* read the packet */ - nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + - pmu->recv.base)); - process = nvkm_rd32(device, 0x10a1c4); - message = nvkm_rd32(device, 0x10a1c4); - data0 = nvkm_rd32(device, 0x10a1c4); - data1 = nvkm_rd32(device, 0x10a1c4); - nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); - - /* release data segment access */ - nvkm_wr32(device, 0x10a580, 0x00000000); - - /* wake process if it's waiting on a synchronous reply */ - if (pmu->recv.process) { - if (process == pmu->recv.process && - message == pmu->recv.message) { - pmu->recv.data[0] = data0; - pmu->recv.data[1] = data1; - pmu->recv.process = 0; - wake_up(&pmu->recv.wait); - return; - } - } + struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); + return pmu->func->recv(pmu); +} - /* right now there's no other expected responses from the engine, - * so assume that any unexpected message is an error. - */ - nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", - (char)((process & 0x000000ff) >> 0), - (char)((process & 0x0000ff00) >> 8), - (char)((process & 0x00ff0000) >> 16), - (char)((process & 0xff000000) >> 24), - process, message, data0, data1); +int +nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], + u32 process, u32 message, u32 data0, u32 data1) +{ + if (!pmu || !pmu->func->send) + return -ENODEV; + return pmu->func->send(pmu, reply, process, message, data0, data1); } static void nvkm_pmu_intr(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - struct nvkm_device *device = pmu->subdev.device; - u32 disp = nvkm_rd32(device, 0x10a01c); - u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); - - if (intr & 0x00000020) { - u32 stat = nvkm_rd32(device, 0x10a16c); - if (stat & 0x80000000) { - nvkm_error(subdev, "UAS fault at %06x addr %08x\n", - stat & 0x00ffffff, - nvkm_rd32(device, 0x10a168)); - nvkm_wr32(device, 0x10a16c, 0x00000000); - intr &= ~0x00000020; - } - } - - if (intr & 0x00000040) { - schedule_work(&pmu->recv.work); - nvkm_wr32(device, 0x10a004, 0x00000040); - intr &= ~0x00000040; - } - - if (intr & 0x00000080) { - nvkm_info(subdev, "wr32 %06x %08x\n", - nvkm_rd32(device, 0x10a7a0), - nvkm_rd32(device, 0x10a7a4)); - nvkm_wr32(device, 0x10a004, 0x00000080); - intr &= ~0x00000080; - } - - if (intr) { - nvkm_error(subdev, "intr %08x\n", intr); - nvkm_wr32(device, 0x10a004, intr); - } + if (!pmu->func->intr) + return; + pmu->func->intr(pmu); } static int nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - struct nvkm_device *device = pmu->subdev.device; - nvkm_wr32(device, 0x10a014, 0x00000060); + if (pmu->func->fini) + pmu->func->fini(pmu); + flush_work(&pmu->recv.work); return 0; } static int -nvkm_pmu_init(struct nvkm_subdev *subdev) +nvkm_pmu_reset(struct nvkm_pmu *pmu) { - struct nvkm_pmu *pmu = nvkm_pmu(subdev); struct nvkm_device *device = pmu->subdev.device; - int i; - /* prevent previous ucode from running, wait for idle, reset */ - nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ + if (!(nvkm_rd32(device, 0x000200) & 0x00002000)) + return 0; + + /* Inhibit interrupts, and wait for idle. */ + nvkm_wr32(device, 0x10a014, 0x0000ffff); nvkm_msec(device, 2000, if (!nvkm_rd32(device, 0x10a04c)) break; ); - nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); - nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); - nvkm_rd32(device, 0x000200); + + /* Reset. */ + pmu->func->reset(pmu); + + /* Wait for IMEM/DMEM scrubbing to be complete. */ nvkm_msec(device, 2000, if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) break; ); - /* upload data segment */ - nvkm_wr32(device, 0x10a1c0, 0x01000000); - for (i = 0; i < pmu->func->data.size / 4; i++) - nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]); - - /* upload code segment */ - nvkm_wr32(device, 0x10a180, 0x01000000); - for (i = 0; i < pmu->func->code.size / 4; i++) { - if ((i & 0x3f) == 0) - nvkm_wr32(device, 0x10a188, i >> 6); - nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]); - } - - /* start it running */ - nvkm_wr32(device, 0x10a10c, 0x00000000); - nvkm_wr32(device, 0x10a104, 0x00000000); - nvkm_wr32(device, 0x10a100, 0x00000002); - - /* wait for valid host->pmu ring configuration */ - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x10a4d0)) - break; - ) < 0) - return -EBUSY; - pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; - pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; + return 0; +} - /* wait for valid pmu->host ring configuration */ - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x10a4dc)) - break; - ) < 0) - return -EBUSY; - pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; - pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; +static int +nvkm_pmu_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_pmu *pmu = nvkm_pmu(subdev); + return nvkm_pmu_reset(pmu); +} - nvkm_wr32(device, 0x10a010, 0x000000e0); - return 0; +static int +nvkm_pmu_init(struct nvkm_subdev *subdev) +{ + struct nvkm_pmu *pmu = nvkm_pmu(subdev); + int ret = nvkm_pmu_reset(pmu); + if (ret == 0 && pmu->func->init) + ret = pmu->func->init(pmu); + return ret; } static void * @@ -262,6 +122,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev) static const struct nvkm_subdev_func nvkm_pmu = { .dtor = nvkm_pmu_dtor, + .preinit = nvkm_pmu_preinit, .init = nvkm_pmu_init, .fini = nvkm_pmu_fini, .intr = nvkm_pmu_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c index aeb8ccd891fc..0e36d4cb7201 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c @@ -30,6 +30,12 @@ gf100_pmu = { .code.size = sizeof(gf100_pmu_code), .data.data = gf100_pmu_data, .data.size = sizeof(gf100_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c index fbc88d8ecd4d..0e4ba4248b15 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c @@ -30,6 +30,12 @@ gf119_pmu = { .code.size = sizeof(gf119_pmu_code), .data.data = gf119_pmu_data, .data.size = sizeof(gf119_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 86f9f3b13f71..2ad858d825ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -109,6 +109,12 @@ gk104_pmu = { .code.size = sizeof(gk104_pmu_code), .data.data = gk104_pmu_data, .data.size = sizeof(gk104_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, .pgob = gk104_pmu_pgob, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c index ae255247c9d1..fc4b8ecfdaeb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c @@ -88,6 +88,12 @@ gk110_pmu = { .code.size = sizeof(gk110_pmu_code), .data.data = gk110_pmu_data, .data.size = sizeof(gk110_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, .pgob = gk110_pmu_pgob, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c index 3b4917637902..e9a91277683a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c @@ -30,6 +30,12 @@ gk208_pmu = { .code.size = sizeof(gk208_pmu_code), .data.data = gk208_pmu_data, .data.size = sizeof(gk208_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, .pgob = gk110_pmu_pgob, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c index 31b8692b4641..9a248ed75f09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c @@ -32,6 +32,12 @@ gm107_pmu = { .code.size = sizeof(gm107_pmu_code), .data.data = gm107_pmu_data, .data.size = sizeof(gm107_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c new file mode 100644 index 000000000000..6c41c20c85a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c @@ -0,0 +1,35 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "priv.h" + +static const struct nvkm_pmu_func +gp100_pmu = { + .reset = gt215_pmu_reset, +}; + +int +gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c new file mode 100644 index 000000000000..f017352206c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "priv.h" + +static void +gp102_pmu_reset(struct nvkm_pmu *pmu) +{ + struct nvkm_device *device = pmu->subdev.device; + nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000001); + nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000); +} + +static const struct nvkm_pmu_func +gp102_pmu = { + .reset = gp102_pmu_reset, +}; + +int +gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c index dcf9eaf274aa..90d428b3be97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c @@ -24,12 +24,229 @@ #include "priv.h" #include "fuc/gt215.fuc3.h" +#include <subdev/timer.h> + +int +gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], + u32 process, u32 message, u32 data0, u32 data1) +{ + struct nvkm_subdev *subdev = &pmu->subdev; + struct nvkm_device *device = subdev->device; + u32 addr; + + mutex_lock(&subdev->mutex); + /* wait for a free slot in the fifo */ + addr = nvkm_rd32(device, 0x10a4a0); + if (nvkm_msec(device, 2000, + u32 tmp = nvkm_rd32(device, 0x10a4b0); + if (tmp != (addr ^ 8)) + break; + ) < 0) { + mutex_unlock(&subdev->mutex); + return -EBUSY; + } + + /* we currently only support a single process at a time waiting + * on a synchronous reply, take the PMU mutex and tell the + * receive handler what we're waiting for + */ + if (reply) { + pmu->recv.message = message; + pmu->recv.process = process; + } + + /* acquire data segment access */ + do { + nvkm_wr32(device, 0x10a580, 0x00000001); + } while (nvkm_rd32(device, 0x10a580) != 0x00000001); + + /* write the packet */ + nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + + pmu->send.base)); + nvkm_wr32(device, 0x10a1c4, process); + nvkm_wr32(device, 0x10a1c4, message); + nvkm_wr32(device, 0x10a1c4, data0); + nvkm_wr32(device, 0x10a1c4, data1); + nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); + + /* release data segment access */ + nvkm_wr32(device, 0x10a580, 0x00000000); + + /* wait for reply, if requested */ + if (reply) { + wait_event(pmu->recv.wait, (pmu->recv.process == 0)); + reply[0] = pmu->recv.data[0]; + reply[1] = pmu->recv.data[1]; + } + + mutex_unlock(&subdev->mutex); + return 0; +} + +void +gt215_pmu_recv(struct nvkm_pmu *pmu) +{ + struct nvkm_subdev *subdev = &pmu->subdev; + struct nvkm_device *device = subdev->device; + u32 process, message, data0, data1; + + /* nothing to do if GET == PUT */ + u32 addr = nvkm_rd32(device, 0x10a4cc); + if (addr == nvkm_rd32(device, 0x10a4c8)) + return; + + /* acquire data segment access */ + do { + nvkm_wr32(device, 0x10a580, 0x00000002); + } while (nvkm_rd32(device, 0x10a580) != 0x00000002); + + /* read the packet */ + nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + + pmu->recv.base)); + process = nvkm_rd32(device, 0x10a1c4); + message = nvkm_rd32(device, 0x10a1c4); + data0 = nvkm_rd32(device, 0x10a1c4); + data1 = nvkm_rd32(device, 0x10a1c4); + nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); + + /* release data segment access */ + nvkm_wr32(device, 0x10a580, 0x00000000); + + /* wake process if it's waiting on a synchronous reply */ + if (pmu->recv.process) { + if (process == pmu->recv.process && + message == pmu->recv.message) { + pmu->recv.data[0] = data0; + pmu->recv.data[1] = data1; + pmu->recv.process = 0; + wake_up(&pmu->recv.wait); + return; + } + } + + /* right now there's no other expected responses from the engine, + * so assume that any unexpected message is an error. + */ + nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", + (char)((process & 0x000000ff) >> 0), + (char)((process & 0x0000ff00) >> 8), + (char)((process & 0x00ff0000) >> 16), + (char)((process & 0xff000000) >> 24), + process, message, data0, data1); +} + +void +gt215_pmu_intr(struct nvkm_pmu *pmu) +{ + struct nvkm_subdev *subdev = &pmu->subdev; + struct nvkm_device *device = subdev->device; + u32 disp = nvkm_rd32(device, 0x10a01c); + u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); + + if (intr & 0x00000020) { + u32 stat = nvkm_rd32(device, 0x10a16c); + if (stat & 0x80000000) { + nvkm_error(subdev, "UAS fault at %06x addr %08x\n", + stat & 0x00ffffff, + nvkm_rd32(device, 0x10a168)); + nvkm_wr32(device, 0x10a16c, 0x00000000); + intr &= ~0x00000020; + } + } + + if (intr & 0x00000040) { + schedule_work(&pmu->recv.work); + nvkm_wr32(device, 0x10a004, 0x00000040); + intr &= ~0x00000040; + } + + if (intr & 0x00000080) { + nvkm_info(subdev, "wr32 %06x %08x\n", + nvkm_rd32(device, 0x10a7a0), + nvkm_rd32(device, 0x10a7a4)); + nvkm_wr32(device, 0x10a004, 0x00000080); + intr &= ~0x00000080; + } + + if (intr) { + nvkm_error(subdev, "intr %08x\n", intr); + nvkm_wr32(device, 0x10a004, intr); + } +} + +void +gt215_pmu_fini(struct nvkm_pmu *pmu) +{ + nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060); +} + +void +gt215_pmu_reset(struct nvkm_pmu *pmu) +{ + struct nvkm_device *device = pmu->subdev.device; + nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); + nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); + nvkm_rd32(device, 0x000200); +} + +int +gt215_pmu_init(struct nvkm_pmu *pmu) +{ + struct nvkm_device *device = pmu->subdev.device; + int i; + + /* upload data segment */ + nvkm_wr32(device, 0x10a1c0, 0x01000000); + for (i = 0; i < pmu->func->data.size / 4; i++) + nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]); + + /* upload code segment */ + nvkm_wr32(device, 0x10a180, 0x01000000); + for (i = 0; i < pmu->func->code.size / 4; i++) { + if ((i & 0x3f) == 0) + nvkm_wr32(device, 0x10a188, i >> 6); + nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]); + } + + /* start it running */ + nvkm_wr32(device, 0x10a10c, 0x00000000); + nvkm_wr32(device, 0x10a104, 0x00000000); + nvkm_wr32(device, 0x10a100, 0x00000002); + + /* wait for valid host->pmu ring configuration */ + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x10a4d0)) + break; + ) < 0) + return -EBUSY; + pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; + pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; + + /* wait for valid pmu->host ring configuration */ + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x10a4dc)) + break; + ) < 0) + return -EBUSY; + pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; + pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; + + nvkm_wr32(device, 0x10a010, 0x000000e0); + return 0; +} + static const struct nvkm_pmu_func gt215_pmu = { .code.data = gt215_pmu_code, .code.size = sizeof(gt215_pmu_code), .data.data = gt215_pmu_data, .data.size = sizeof(gt215_pmu_data), + .reset = gt215_pmu_reset, + .init = gt215_pmu_init, + .fini = gt215_pmu_fini, + .intr = gt215_pmu_intr, + .send = gt215_pmu_send, + .recv = gt215_pmu_recv, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 73b811ccc2d5..2e2179a4ad17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -18,8 +18,22 @@ struct nvkm_pmu_func { u32 size; } data; + void (*reset)(struct nvkm_pmu *); + int (*init)(struct nvkm_pmu *); + void (*fini)(struct nvkm_pmu *); + void (*intr)(struct nvkm_pmu *); + int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process, + u32 message, u32 data0, u32 data1); + void (*recv)(struct nvkm_pmu *); void (*pgob)(struct nvkm_pmu *, bool); }; +void gt215_pmu_reset(struct nvkm_pmu *); +int gt215_pmu_init(struct nvkm_pmu *); +void gt215_pmu_fini(struct nvkm_pmu *); +void gt215_pmu_intr(struct nvkm_pmu *); +void gt215_pmu_recv(struct nvkm_pmu *); +int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); + void gk110_pmu_pgob(struct nvkm_pmu *, bool); #endif |