summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h76
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c319
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
45 files changed, 1005 insertions, 590 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75edef01a..30ef41bcd7b8 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f689a7..6e047bd53e2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106beca5b6..1c263dac3c1c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,7 +14,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of_device.h>
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
-MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
},
{},
};
-MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a5466075..eddea4941483 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
+ int ret;
+
+ ret = pm_runtime_get_sync(g2d->dev);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed pm power on.\n");
+ return;
+ }
- pm_runtime_get_sync(g2d->dev);
- clk_enable(g2d->gate_clk);
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed to enable clock.\n");
+ pm_runtime_put_sync(g2d->dev);
+ return;
+ }
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
- clk_disable(g2d->gate_clk);
+ clk_disable_unprepare(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_g2d_match);
#endif
struct platform_driver g2d_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b25e7f2..90b8a1a5344c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d622f0..8d3bc01d6834 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7ff9c9..d2b6ab4def93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (!ippdrv) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return -EINVAL;
+ return PTR_ERR(ippdrv);
}
prop_list = ippdrv->prop_list;
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("failed to get command node.\n");
- return -EFAULT;
+ return PTR_ERR(c_node);
}
/* buffer control */
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("invalid command node list.\n");
- return -EINVAL;
+ return PTR_ERR(c_node);
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640aa5148..49669aa24c45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d83e4e..c57c56519add 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef5971ac3c..2f5c6942c968 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255076c7..6e320ae9afed 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71c63bc..c9a137caea41 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b1520682cb23..66c63808fa35 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1649,7 +1649,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (INTEL_INFO(dev)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
- acpi_video_register_with_quirks();
+ acpi_video_register();
}
if (IS_GEN5(dev))
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784aa2225..503a414cbdad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
int i;
if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ if (fb && fb->bits_per_pixel == 16) {
+ int inc = (fb->depth == 15) ? 8 : 4;
+ u8 r, b;
+ for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
+ if (fb->depth == 16) {
+ if (i > (MGAG200_LUT_SIZE >> 1)) {
+ r = b = 0;
+ } else {
+ r = mga_crtc->lut_r[i << 1];
+ b = mga_crtc->lut_b[i << 1];
+ }
+ } else {
+ r = mga_crtc->lut_r[i];
+ b = mga_crtc->lut_b[i];
+ }
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ }
+ return;
+ }
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
if (crtc->fb->bits_per_pixel == 24)
- pitch = pitch >> (4 - bppshift);
+ pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
kfree(mga_crtc);
}
+static void mga_crtc_disable(struct drm_crtc *crtc)
+{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
+ struct drm_gem_object *obj = mga_fb->obj;
+ struct mgag200_bo *bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+ crtc->fb = NULL;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
.cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
.mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b044c7b..13878d5de063 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -353,6 +353,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
bo->pin_count++;
if (gpu_addr)
*gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
}
mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc523b2..a19e7d79b847 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24fc1df..717639386ced 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918469a8..526b75242899 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
{
struct nv50_disp_priv *priv = (void *)object->engine;
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct dcb_output outp;
- u8 ver, hdr;
u32 data;
int ret = -EINVAL;
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
return -EINVAL;
data = *(u32 *)args;
- if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
- return -ENODEV;
switch (mthd & ~0x3f) {
case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb859b25..c19004301309 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int
nv31_mpeg_init(struct nouveau_object *object)
{
- struct nouveau_engine *engine = nv_engine(object->engine);
- struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv31_mpeg_priv *priv = (void *)object;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+ if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
+ nv_wr32(priv, 0x00b220, 0x00000044);
+ else
+ nv_wr32(priv, 0x00b220, 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581ad1991..dd6196072e9c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc59d0a5..5f6ede7c4892 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
return ret;
}
- ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0,
+ if (fw->size > 0x40000) {
+ nv_warn(xtensa, "firmware %s too large\n", name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b105666..fcf57fa309bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct nouveau_vmmgr *vmm;
struct nouveau_mm mm;
- int refcount;
+ struct kref refcount;
struct list_head pgd_list;
atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd83e8b..db9d6ddde52c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7ee8ad9..903baff77fdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
#include "priv.h"
void
-nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
{
struct nouveau_mm_node *this;
- struct nouveau_mem *mem;
- mem = *pmem;
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&pfb->base.mutex);
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
nouveau_mm_free(&pfb->tags, &mem->tag);
+}
+
+void
+nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct nouveau_mem *mem = *pmem;
+
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ mutex_lock(&pfb->base.mutex);
+ __nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634acbb9d..cf97c4de4a6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_mem *mem = *pmem;
- if ((*pmem)->tag)
- ltcg->tags_free(ltcg, &(*pmem)->tag);
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
- nv50_ram_put(pfb, pmem);
+ mutex_lock(&pfb->base.mutex);
+ if (mem->tag)
+ ltcg->tags_free(ltcg, &mem->tag);
+ __nv50_ram_put(pfb, mem);
+ mutex_unlock(&pfb->base.mutex);
+
+ kfree(mem);
}
int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dcf46e2..c4c1d415e7fe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
}
nv_wr32(priv, 0xe054, intr0);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe074, intr1);
}
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_gpio_create(parent, engine, oclass,
- nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ nv_device(parent)->chipset > 0x92 ? 32 : 16,
&priv);
*pobject = nv_object(priv);
if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
/* disable, and ack any pending gpio interrupts */
nv_wr32(priv, 0xe050, 0x00000000);
nv_wr32(priv, 0xe054, 0xffffffff);
- if (nv_device(priv)->chipset >= 0x90) {
+ if (nv_device(priv)->chipset > 0x92) {
nv_wr32(priv, 0xe070, 0x00000000);
nv_wr32(priv, 0xe074, 0xffffffff);
}
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_gpio_priv *priv = (void *)object;
nv_wr32(priv, 0xe050, 0x00000000);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe070, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a5e72c..f25fc5fc7dd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0000d101, NVDEV_SUBDEV_FB },
+ { 0x0002d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c852ac..ef3133e7575c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
INIT_LIST_HEAD(&vm->pgd_list);
vm->vmm = vmm;
- vm->refcount = 1;
+ kref_init(&vm->refcount);
vm->fpde = offset >> (vmm->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
}
static void
-nouveau_vm_del(struct nouveau_vm *vm)
+nouveau_vm_del(struct kref *kref)
{
+ struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nouveau_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
struct nouveau_gpuobj *pgd)
{
- struct nouveau_vm *vm;
- int ret;
-
- vm = ref;
- if (vm) {
- ret = nouveau_vm_link(vm, pgd);
+ if (ref) {
+ int ret = nouveau_vm_link(ref, pgd);
if (ret)
return ret;
- vm->refcount++;
+ kref_get(&ref->refcount);
}
- vm = *ptr;
- *ptr = ref;
-
- if (vm) {
- nouveau_vm_unlink(vm, pgd);
-
- if (--vm->refcount == 0)
- nouveau_vm_del(vm);
+ if (*ptr) {
+ nouveau_vm_unlink(*ptr, pgd);
+ kref_put(&(*ptr)->refcount, nouveau_vm_del);
}
+ *ptr = ref;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4e7ee5f4155c..af20fba3a1a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -198,7 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
- int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
+ int lpg_shift = 12;
+ int max_size;
+
+ if (drm->client.base.vm)
+ lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
nv_warn(drm, "skipped size %x\n", (u32)size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4c1bc061fae2..8f6d63d7edd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -398,7 +398,8 @@ void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
+ if (drm->fbcon)
+ drm_fb_helper_hotplug_event(&drm->fbcon->helper);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c3..22aa9963ea6f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db8..0ee363840035 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
- int ret;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
if (release == NULL)
break;
- ret = qxl_release_reserve(qdev, release, false);
- if (ret) {
- qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
- DRM_ERROR("failed to reserve release %lld\n", id);
- }
-
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- qxl_release_unreserve(qdev, release);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
return i;
}
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
- ret = qxl_bo_reserve(bo, false);
- if (unlikely(ret != 0))
+ ret = qxl_release_list_add(release, bo);
+ if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
- return 0;
+ return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
if (ret)
return ret;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ return ret;
+
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
surf->surf_create = release;
- /* no need to add a release to the fence for this bo,
+ /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
- qxl_release_unreserve(qdev, release);
-
+ qxl_release_fence_buffer_objects(release);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
-static void
+static int
qxl_hide_cursor(struct qxl_device *qdev)
{
struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+ return 0;
}
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
int size = 64*64*4;
int ret = 0;
- if (!handle) {
- qxl_hide_cursor(qdev);
- return 0;
- }
+ if (!handle)
+ return qxl_hide_cursor(qdev);
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
goto out_unref;
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ qxl_bo_unreserve(user_bo);
if (ret)
- goto out_unreserve;
+ goto out_unref;
ret = qxl_bo_kmap(user_bo, &user_ptr);
if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
&release, NULL);
if (ret)
goto out_kunmap;
- ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
- &cursor_bo);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
+ &cursor_bo);
if (ret)
goto out_free_release;
- ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+
+ ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bo;
+ ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+ if (ret)
+ goto out_backoff;
+
cursor->header.unique = 0;
cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(cursor_bo);
- /* finish with the userspace bo */
qxl_bo_kunmap(user_bo);
- qxl_bo_unpin(user_bo);
- qxl_bo_unreserve(user_bo);
- drm_gem_object_unreference_unlocked(obj);
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
- qxl_release_add_res(qdev, release, cursor_bo);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+ /* finish with the userspace bo */
+ ret = qxl_bo_reserve(user_bo, false);
+ if (!ret) {
+ qxl_bo_unpin(user_bo);
+ qxl_bo_unreserve(user_bo);
+ }
+ drm_gem_object_unreference_unlocked(obj);
- qxl_bo_unreserve(cursor_bo);
qxl_bo_unref(&cursor_bo);
return ret;
+
+out_backoff:
+ qxl_release_backoff_reserve_list(release);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_free_release:
- qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
out_kunmap:
qxl_bo_kunmap(user_bo);
out_unpin:
qxl_bo_unpin(user_bo);
-out_unreserve:
- qxl_bo_unreserve(user_bo);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
qcrtc->cur_x = x;
qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int alloc_clips(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned num_clips,
+ struct qxl_bo **clips_bo)
+{
+ int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
+
+ return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
+}
+
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
- struct qxl_bo **clips_bo,
- struct qxl_release *release)
+ struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
int ret;
- int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
- ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
- if (ret)
- return NULL;
- ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+ ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
if (ret) {
- qxl_bo_unref(clips_bo);
return NULL;
}
dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
}
static int
+alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
+{
+ int ret;
+ ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+ QXL_RELEASE_DRAWABLE, release,
+ NULL);
+ return ret;
+}
+
+static void
+free_drawable(struct qxl_device *qdev, struct qxl_release *release)
+{
+ qxl_release_free(qdev, release);
+}
+
+/* release needs to be reserved at this point */
+static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
- struct qxl_release **release)
+ struct qxl_release *release)
{
struct qxl_drawable *drawable;
- int i, ret;
+ int i;
- ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
- QXL_RELEASE_DRAWABLE, release,
- NULL);
- if (ret)
- return ret;
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ if (!drawable)
+ return -ENOMEM;
- drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
- qxl_release_unmap(qdev, *release, &drawable->release_info);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
return 0;
}
-static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+static int alloc_palette_object(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **palette_bo)
+{
+ return qxl_alloc_bo_reserved(qdev, release,
+ sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+ palette_bo);
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ struct qxl_release *release,
const struct qxl_fb_image *qxl_fb_image)
{
- struct qxl_device *qdev = qxl_fb_image->qdev;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
uint32_t visual = qxl_fb_image->visual;
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
static uint64_t unique; /* we make no attempt to actually set this
* correctly globaly, since that would require
* tracking all of our palettes. */
-
- ret = qxl_alloc_bo_reserved(qdev,
- sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
- palette_bo);
-
- ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+ ret = qxl_bo_kmap(palette_bo, (void **)&pal);
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
}
pal->ents[0] = bgcolor;
pal->ents[1] = fgcolor;
- qxl_bo_kunmap(*palette_bo);
+ qxl_bo_kunmap(palette_bo);
return 0;
}
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
const char *src = fb_image->data;
int depth = fb_image->depth;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_image *image;
int ret;
-
+ struct qxl_drm_image *dimage;
+ struct qxl_bo *palette_bo = NULL;
if (stride == 0)
stride = depth * width / 8;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_drawable;
+
+ if (depth == 1) {
+ ret = alloc_palette_object(qdev, release, &palette_bo);
+ if (ret)
+ goto out_free_image;
+ }
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_palette;
+
rect.left = x;
rect.right = x + width;
rect.top = y;
rect.bottom = y + height;
- ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_palette;
+ }
- ret = qxl_image_create(qdev, release, &image_bo,
- (const uint8_t *)src, 0, 0,
- width, height, depth, stride);
+ ret = qxl_image_init(qdev, release, dimage,
+ (const uint8_t *)src, 0, 0,
+ width, height, depth, stride);
if (ret) {
- qxl_release_unreserve(qdev, release);
+ qxl_release_backoff_reserve_list(release);
qxl_release_free(qdev, release);
return;
}
if (depth == 1) {
- struct qxl_bo *palette_bo;
void *ptr;
- ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
- qxl_release_add_res(qdev, release, palette_bo);
+ ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
- ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+ ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
image = ptr;
image->u.bitmap.palette =
qxl_bo_physical_address(qdev, palette_bo, 0);
- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
- qxl_bo_unreserve(palette_bo);
- qxl_bo_unref(&palette_bo);
+ qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap =
- qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
-
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_palette:
+ if (palette_bo)
+ qxl_bo_unref(&palette_bo);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_drawable:
+ if (ret)
+ free_drawable(qdev, release);
}
/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
int depth = qxl_fb->base.bits_per_pixel;
uint8_t *surface_base;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_bo *clips_bo;
+ struct qxl_drm_image *dimage;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
width = right - left;
height = bottom - top;
+
+ ret = alloc_clips(qdev, release, num_clips, &clips_bo);
+ if (ret)
+ goto out_free_drawable;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_clips;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_image;
+
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
+
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
- &release);
+ release);
if (ret)
- return;
+ goto out_release_backoff;
ret = qxl_bo_kmap(bo, (void **)&surface_base);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
- ret = qxl_image_create(qdev, release, &image_bo, surface_base,
- left, top, width, height, depth, stride);
+
+ ret = qxl_image_init(qdev, release, dimage, surface_base,
+ left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
+
+ rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ if (!rects)
+ goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
- if (!rects) {
- qxl_bo_unref(&image_bo);
- goto out_unref;
- }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
- qxl_release_add_res(qdev, release, clips_bo);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
- drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+ drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
+
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_kunmap(clips_bo);
- qxl_bo_unreserve(clips_bo);
- qxl_bo_unref(&clips_bo);
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
- return;
+ qxl_release_fence_buffer_objects(release);
+
+out_release_backoff:
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_clips:
+ qxl_bo_unref(&clips_bo);
+out_free_drawable:
+ /* only free drawable on error */
+ if (ret)
+ free_drawable(qdev, release);
-out_unref:
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
}
void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
struct qxl_release *release;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
rect.left = dx;
rect.top = dy;
rect.right = dx + width;
rect.bottom = dy + height;
- ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy_bits.src_pos.x = sx;
drawable->u.copy_bits.src_pos.y = sy;
-
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
struct qxl_release *release;
int ret;
- ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+ ret = alloc_drawable(qdev, &release);
if (ret)
return;
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
+
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
drawable->u.fill.mask.bitmap = 0;
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..7e96f4f11738 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+/* just for ttm_validate_buffer */
+#include <ttm/ttm_execbuf_util.h>
+
#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -118,9 +121,9 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
- atomic_t reserve_count;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct list_head lhead;
- struct qxl_bo *bo;
-};
-
-struct qxl_reloc_list {
- struct list_head bos;
+ struct ttm_validate_buffer tv;
};
struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
struct qxl_release {
int id;
int type;
- int bo_count;
uint32_t release_offset;
uint32_t surface_release_id;
- struct qxl_bo *bos[QXL_MAX_RES];
+ struct ww_acquire_ctx ticket;
+ struct list_head bos;
+};
+
+struct qxl_drm_chunk {
+ struct list_head head;
+ struct qxl_bo *bo;
+};
+
+struct qxl_drm_image {
+ struct qxl_bo *bo;
+ struct list_head chunk_list;
};
struct qxl_fb_image {
@@ -314,6 +322,7 @@ struct qxl_device {
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
+ struct work_struct fb_work;
};
/* forward declaration for QXL_INFO_IO */
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
/* qxl image */
-int qxl_image_create(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int x, int y, int width, int height,
- int depth, int stride);
+int qxl_image_init(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride);
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image **image_ptr,
+ int height, int stride);
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
+
void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
void qxl_io_flush_release(struct qxl_device *qdev);
void qxl_io_flush_surfaces(struct qxl_device *qdev);
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait);
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release);
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info);
-/*
- * qxl_bo_add_resource.
- *
- */
-void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+void qxl_release_backoff_reserve_list(struct qxl_release *release);
+void qxl_release_fence_buffer_objects(struct qxl_release *release);
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo);
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release);
+
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo);
/* qxl drawing commands */
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
u32 sx, u32 sy,
u32 dx, u32 dy);
-uint64_t
-qxl_release_alloc(struct qxl_device *qdev, int type,
- struct qxl_release **ret);
-
void qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release);
-void qxl_release_add_res(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo *bo);
+
/* used by qxl_debugfs_release */
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id);
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
/* qxl_fence.c */
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
#define QXL_DIRTY_DELAY (HZ / 30)
+#define QXL_FB_OP_FILLRECT 1
+#define QXL_FB_OP_COPYAREA 2
+#define QXL_FB_OP_IMAGEBLIT 3
+
+struct qxl_fb_op {
+ struct list_head head;
+ int op_type;
+ union {
+ struct fb_fillrect fr;
+ struct fb_copyarea ca;
+ struct fb_image ib;
+ } op;
+ void *img_data;
+};
+
struct qxl_fbdev {
struct drm_fb_helper helper;
struct qxl_framebuffer qfb;
struct list_head fbdev_list;
struct qxl_device *qdev;
+ spinlock_t delayed_ops_lock;
+ struct list_head delayed_ops;
void *shadow;
int size;
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
.deferred_io = qxl_deferred_io,
};
-static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
+static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.fr = *fb_rect;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_FILLRECT;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
+ const struct fb_copyarea *fb_copy)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ca = *fb_copy;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_COPYAREA;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
+ const struct fb_image *fb_image)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+ uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
+
+ op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ib = *fb_image;
+ op->img_data = (void *)(op + 1);
+ op->op_type = QXL_FB_OP_IMAGEBLIT;
+
+ memcpy(op->img_data, fb_image->data, size);
+
+ op->op.ib.data = op->img_data;
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_fillrect_internal(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
qxl_draw_fill_rec.rect = rect;
qxl_draw_fill_rec.color = color;
qxl_draw_fill_rec.rop = rop;
+
+ qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
if (!drm_can_sleep()) {
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_fillrect(qfbdev, fb_rect);
+ schedule_work(&qdev->fb_work);
return;
}
- qxl_draw_fill(&qxl_draw_fill_rec);
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_fillrect_internal(info, fb_rect);
}
-static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
+static void qxl_fb_copyarea_internal(struct fb_info *info,
+ const struct fb_copyarea *region)
{
struct qxl_fbdev *qfbdev = info->par;
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
region->dx, region->dy);
}
+static void qxl_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
+ if (!drm_can_sleep()) {
+ qxl_fb_delayed_copyarea(qfbdev, region);
+ schedule_work(&qdev->fb_work);
+ return;
+ }
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_copyarea_internal(info, region);
+}
+
static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
{
qxl_draw_opaque_fb(qxl_fb_image, 0);
}
+static void qxl_fb_imageblit_internal(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_fb_image qxl_fb_image;
+
+ /* ensure proper order rendering operations - TODO: must do this
+ * for everything. */
+ qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+ qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
static void qxl_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_fb_image qxl_fb_image;
if (!drm_can_sleep()) {
- /* we cannot do any ttm_bo allocation since that will fail on
- * ioremap_wc..__get_vm_area_node, so queue the work item
- * instead This can happen from printk inside an interrupt
- * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_imageblit(qfbdev, image);
+ schedule_work(&qdev->fb_work);
return;
}
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_imageblit_internal(info, image);
+}
- /* ensure proper order of rendering operations - TODO: must do this
- * for everything. */
- qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
- qxl_fb_imageblit_safe(&qxl_fb_image);
+static void qxl_fb_work(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
+ unsigned long flags;
+ struct qxl_fb_op *entry, *tmp;
+ struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
+
+ /* since the irq context just adds entries to the end of the
+ list dropping the lock should be fine, as entry isn't modified
+ in the operation code */
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+ switch (entry->op_type) {
+ case QXL_FB_OP_FILLRECT:
+ qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
+ break;
+ case QXL_FB_OP_COPYAREA:
+ qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
+ break;
+ case QXL_FB_OP_IMAGEBLIT:
+ qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
+ break;
+ }
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
}
int qxl_fb_init(struct qxl_device *qdev)
{
+ INIT_WORK(&qdev->fb_work, qxl_fb_work);
return 0;
}
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
qfbdev->helper.funcs = &qxl_fb_helper_funcs;
-
+ spin_lock_init(&qfbdev->delayed_ops_lock);
+ INIT_LIST_HEAD(&qfbdev->delayed_ops);
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
For some reason every so often qxl hw fails to release, things go wrong.
*/
-
-
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+/* must be called with the fence lock held */
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
{
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
- spin_unlock(&bo->tbo.bdev->fence_lock);
- return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..25e1777fb0a2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
#include "qxl_object.h"
static int
-qxl_image_create_helper(struct qxl_device *qdev,
+qxl_allocate_chunk(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *image,
+ unsigned int chunk_size)
+{
+ struct qxl_drm_chunk *chunk;
+ int ret;
+
+ chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
+ if (ret) {
+ kfree(chunk);
+ return ret;
+ }
+
+ list_add_tail(&chunk->head, &image->chunk_list);
+ return 0;
+}
+
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int width, int height,
- int depth, unsigned int hash,
- int stride)
+ struct qxl_drm_image **image_ptr,
+ int height, int stride)
+{
+ struct qxl_drm_image *image;
+ int ret;
+
+ image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&image->chunk_list);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
+ if (ret) {
+ kfree(image);
+ return ret;
+ }
+
+ ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
+ if (ret) {
+ qxl_bo_unref(&image->bo);
+ kfree(image);
+ return ret;
+ }
+ *image_ptr = image;
+ return 0;
+}
+
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
+ struct qxl_drm_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
+ qxl_bo_unref(&chunk->bo);
+ kfree(chunk);
+ }
+
+ qxl_bo_unref(&dimage->bo);
+ kfree(dimage);
+}
+
+static int
+qxl_image_init_helper(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int width, int height,
+ int depth, unsigned int hash,
+ int stride)
+{
+ struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
- struct qxl_bo *chunk_bo;
- int ret;
+ struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
+
+ drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
+
+ chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
- &chunk_bo);
-
+
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
-
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
}
}
}
-
-
qxl_bo_kunmap(chunk_bo);
- /* Image */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
-
- ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+ image_bo = dimage->bo;
+ ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
- qxl_release_add_res(qdev, release, chunk_bo);
- qxl_bo_unreserve(chunk_bo);
- qxl_bo_unref(&chunk_bo);
- qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
-int qxl_image_create(struct qxl_device *qdev,
+int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
+ struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
- return qxl_image_create_helper(qdev, release, image_bo, data,
+ return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..6de33563d6f1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
&qxl_map->offset);
}
+struct qxl_reloc_info {
+ int type;
+ struct qxl_bo *dst_bo;
+ uint32_t dst_offset;
+ struct qxl_bo *src_bo;
+ int src_offset;
+};
+
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
-apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src, uint64_t src_off)
+apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
-
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
- src, src_off);
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ info->src_bo,
+ info->src_offset);
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
static void
-apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src)
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
uint32_t id = 0;
void *reloc_page;
- if (src && !src->is_primary)
- id = src->surface_id;
+ if (info->src_bo && !info->src_bo->is_primary)
+ id = info->src_bo->surface_id;
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
/* return holding the reference to this object */
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
- struct qxl_reloc_list *reloc_list)
+ struct qxl_release *release)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
- if (!gobj) {
- DRM_ERROR("bad bo handle %lld\n", handle);
+ if (!gobj)
return NULL;
- }
+
qobj = gem_to_qxl_bo(gobj);
- ret = qxl_bo_list_add(reloc_list, qobj);
+ ret = qxl_release_list_add(release, qobj);
if (ret)
return NULL;
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
-static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int qxl_process_single_command(struct qxl_device *qdev,
+ struct drm_qxl_command *cmd,
+ struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
- struct drm_qxl_execbuffer *execbuffer = data;
- struct drm_qxl_command user_cmd;
- int cmd_num;
- struct qxl_bo *reloc_src_bo;
- struct qxl_bo *reloc_dst_bo;
- struct drm_qxl_reloc reloc;
+ struct qxl_reloc_info *reloc_info;
+ int release_type;
+ struct qxl_release *release;
+ struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret;
- struct qxl_reloc_list reloc_list;
+ int i, j, ret, num_relocs;
int unwritten;
- uint32_t reloc_dst_offset;
- INIT_LIST_HEAD(&reloc_list.bos);
- for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
- struct qxl_release *release;
- struct qxl_bo *cmd_bo;
- int release_type;
- struct drm_qxl_command *commands =
- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+ switch (cmd->type) {
+ case QXL_CMD_DRAW:
+ release_type = QXL_RELEASE_DRAWABLE;
+ break;
+ case QXL_CMD_SURFACE:
+ case QXL_CMD_CURSOR:
+ default:
+ DRM_DEBUG("Only draw commands in execbuffers\n");
+ return -EINVAL;
+ break;
+ }
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
- sizeof(user_cmd)))
- return -EFAULT;
- switch (user_cmd.type) {
- case QXL_CMD_DRAW:
- release_type = QXL_RELEASE_DRAWABLE;
- break;
- case QXL_CMD_SURFACE:
- case QXL_CMD_CURSOR:
- default:
- DRM_DEBUG("Only draw commands in execbuffers\n");
- return -EINVAL;
- break;
- }
+ if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+ return -EINVAL;
- if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
- return -EINVAL;
+ if (!access_ok(VERIFY_READ,
+ (void *)(unsigned long)cmd->command,
+ cmd->command_size))
+ return -EFAULT;
- if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)user_cmd.command,
- user_cmd.command_size))
- return -EFAULT;
+ reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
- ret = qxl_alloc_release_reserved(qdev,
- sizeof(union qxl_release_info) +
- user_cmd.command_size,
- release_type,
- &release,
- &cmd_bo);
- if (ret)
- return ret;
+ ret = qxl_alloc_release_reserved(qdev,
+ sizeof(union qxl_release_info) +
+ cmd->command_size,
+ release_type,
+ &release,
+ &cmd_bo);
+ if (ret)
+ goto out_free_reloc;
- /* TODO copy slow path code from i915 */
- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
- {
- struct qxl_drawable *draw = fb_cmd;
+ {
+ struct qxl_drawable *draw = fb_cmd;
+ draw->mm_time = qdev->rom->mm_clock;
+ }
- draw->mm_time = qdev->rom->mm_clock;
- }
- qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
- if (unwritten) {
- DRM_ERROR("got unwritten %d\n", unwritten);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
+ qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+ if (unwritten) {
+ DRM_ERROR("got unwritten %d\n", unwritten);
+ ret = -EFAULT;
+ goto out_free_release;
+ }
+
+ /* fill out reloc info structs */
+ num_relocs = 0;
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ struct drm_qxl_reloc reloc;
+
+ if (DRM_COPY_FROM_USER(&reloc,
+ &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+ sizeof(reloc))) {
+ ret = -EFAULT;
+ goto out_free_bos;
}
- for (i = 0 ; i < user_cmd.relocs_num; ++i) {
- if (DRM_COPY_FROM_USER(&reloc,
- &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
- sizeof(reloc))) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
- }
+ /* add the bos to the list of bos to validate -
+ need to validate first then process relocs? */
+ if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+ DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
- /* add the bos to the list of bos to validate -
- need to validate first then process relocs? */
- if (reloc.dst_handle) {
- reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
- reloc.dst_handle, &reloc_list);
- if (!reloc_dst_bo) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- reloc_dst_offset = 0;
- } else {
- reloc_dst_bo = cmd_bo;
- reloc_dst_offset = release->release_offset;
+ ret = -EINVAL;
+ goto out_free_bos;
+ }
+ reloc_info[i].type = reloc.reloc_type;
+
+ if (reloc.dst_handle) {
+ reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.dst_handle, release);
+ if (!reloc_info[i].dst_bo) {
+ ret = -EINVAL;
+ reloc_info[i].src_bo = NULL;
+ goto out_free_bos;
}
-
- /* reserve and validate the reloc dst bo */
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
- reloc_src_bo =
- qxlhw_handle_to_bo(qdev, file_priv,
- reloc.src_handle, &reloc_list);
- if (!reloc_src_bo) {
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- } else
- reloc_src_bo = NULL;
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
- apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
- reloc_src_bo, reloc.src_offset);
- } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
- apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
- } else {
- DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
- return -EINVAL;
+ reloc_info[i].dst_offset = reloc.dst_offset;
+ } else {
+ reloc_info[i].dst_bo = cmd_bo;
+ reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+ }
+ num_relocs++;
+
+ /* reserve and validate the reloc dst bo */
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+ reloc_info[i].src_bo =
+ qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.src_handle, release);
+ if (!reloc_info[i].src_bo) {
+ if (reloc_info[i].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
+ ret = -EINVAL;
+ goto out_free_bos;
}
+ reloc_info[i].src_offset = reloc.src_offset;
+ } else {
+ reloc_info[i].src_bo = NULL;
+ reloc_info[i].src_offset = 0;
+ }
+ }
- if (reloc_src_bo && reloc_src_bo != cmd_bo) {
- qxl_release_add_res(qdev, release, reloc_src_bo);
- drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
- }
+ /* validate all buffers */
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_bos;
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- }
- qxl_fence_releaseable(qdev, release);
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+ apply_reloc(qdev, &reloc_info[i]);
+ else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+ apply_surf_reloc(qdev, &reloc_info[i]);
+ }
- ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
- if (ret == -ERESTARTSYS) {
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- qxl_bo_list_unreserve(&reloc_list, true);
+ ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+ else
+ qxl_release_fence_buffer_objects(release);
+
+out_free_bos:
+ for (j = 0; j < num_relocs; j++) {
+ if (reloc_info[j].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
+ if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
+ }
+out_free_release:
+ if (ret)
+ qxl_release_free(qdev, release);
+out_free_reloc:
+ kfree(reloc_info);
+ return ret;
+}
+
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_execbuffer *execbuffer = data;
+ struct drm_qxl_command user_cmd;
+ int cmd_num;
+ int ret;
+
+ for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+
+ struct drm_qxl_command *commands =
+ (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+
+ if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ sizeof(user_cmd)))
+ return -EFAULT;
+
+ ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+ if (ret)
return ret;
- }
- qxl_release_unreserve(qdev, release);
}
- qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
goto out;
if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type);
+ qxl_ttm_placement_from_domain(qobj, qobj->type, false);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..aa161cddd87e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
+ u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
qbo->placement.fpfn = 0;
qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, u32 domain,
+ unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev,
}
bo->gem_base.driver_private = NULL;
bo->type = domain;
- bo->pin_count = 0;
+ bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
- atomic_set(&bo->reserve_count, 0);
+
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain);
+ qxl_ttm_placement_from_domain(bo, domain, pinned);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- int r, i;
+ int r;
if (bo->pin_count) {
bo->pin_count++;
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ qxl_ttm_placement_from_domain(bo, domain, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
return 0;
}
-void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
-{
- struct qxl_bo_list *entry, *sf;
-
- list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
- qxl_bo_unreserve(entry->bo);
- list_del(&entry->lhead);
- kfree(entry);
- }
-}
-
-int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
-{
- struct qxl_bo_list *entry;
- int ret;
-
- list_for_each_entry(entry, &reloc_list->bos, lhead) {
- if (entry->bo == bo)
- return 0;
- }
-
- entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->bo = bo;
- list_add(&entry->lhead, &reloc_list->bos);
-
- ret = qxl_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
- if (ret)
- return ret;
- }
-
- /* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
- if (ret)
- return ret;
- return 0;
-}
-
int qxl_surf_evict(struct qxl_device *qdev)
{
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..8cb6167038e5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
- bool kernel, u32 domain,
+ bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
-extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
-extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..b61449e52cd5 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-uint64_t
+
+static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return 0;
}
release->type = type;
- release->bo_count = 0;
release->release_offset = 0;
release->surface_release_id = 0;
+ INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- int i;
-
- QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
- release->type, release->bo_count);
+ struct qxl_bo_list *entry, *tmp;
+ QXL_INFO(qdev, "release %d, type %d\n", release->id,
+ release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- for (i = 0 ; i < release->bo_count; ++i) {
+ list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- release->bos[i]->tbo.addr_space_offset
+ entry->tv.bo->addr_space_offset
- DRM_FILE_OFFSET);
- qxl_fence_remove_release(&release->bos[i]->fence, release->id);
- qxl_bo_unref(&release->bos[i]);
+ qxl_fence_remove_release(&bo->fence, release->id);
+ qxl_bo_unref(&bo);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
kfree(release);
}
-void
-qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
- struct qxl_bo *bo)
-{
- int i;
- for (i = 0; i < release->bo_count; i++)
- if (release->bos[i] == bo)
- return;
-
- if (release->bo_count >= QXL_MAX_RES) {
- DRM_ERROR("exceeded max resource on a qxl_release item\n");
- return;
- }
- release->bos[release->bo_count++] = qxl_bo_ref(bo);
-}
-
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
int ret;
- ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+ /* pin releases bo's they are too messy to evict */
+ ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
+ QXL_GEM_DOMAIN_VRAM, NULL,
bo);
return ret;
}
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait)
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+{
+ struct qxl_bo_list *entry;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ if (entry->tv.bo == &bo->tbo)
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ qxl_bo_ref(bo);
+ entry->tv.bo = &bo->tbo;
+ list_add_tail(&entry->tv.head, &release->bos);
+ return 0;
+}
+
+static int qxl_release_validate_bo(struct qxl_bo *bo)
{
int ret;
- if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
- ret = qxl_bo_reserve(release->bos[0], no_wait);
+
+ if (!bo->pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
if (ret)
return ret;
}
+
+ /* allocate a surface for reserved + validated buffers */
+ ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+{
+ int ret;
+ struct qxl_bo_list *entry;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return 0;
+
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+
+ ret = qxl_release_validate_bo(bo);
+ if (ret) {
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ return ret;
+ }
+ }
return 0;
}
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release)
+void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
- if (atomic_dec_and_test(&release->bos[0]->reserve_count))
- qxl_bo_unreserve(release->bos[0]);
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
+
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
- int ret;
-
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
+ struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
- bo = qxl_bo_ref(create_rel->bos[0]);
+ bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
- qxl_release_add_res(qdev, *release, bo);
+ qxl_release_list_add(*release, bo);
- ret = qxl_release_reserve(qdev, *release, false);
- if (ret) {
- DRM_ERROR("release reserve failed\n");
- goto out_unref;
- }
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-
-out_unref:
qxl_bo_unref(&bo);
- return ret;
+ return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
{
struct qxl_bo *bo;
int idr_ret;
- int ret;
+ int ret = 0;
union qxl_release_info *info;
int cur_idx;
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_unlock(&qdev->release_mutex);
return ret;
}
-
- /* pin releases bo's they are too messy to evict */
- ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
- qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
- qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
}
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
if (rbo)
*rbo = bo;
- qxl_release_add_res(qdev, *release, bo);
-
- ret = qxl_release_reserve(qdev, *release, false);
mutex_unlock(&qdev->release_mutex);
- if (ret)
- goto out_unref;
+
+ qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-out_unref:
qxl_bo_unref(&bo);
return ret;
}
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release)
-{
- int i, ret;
- for (i = 0; i < release->bo_count; i++) {
- if (!release->bos[i]->tbo.sync_obj)
- release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
- ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
- if (ret)
- return ret;
- }
- return 0;
-}
-
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
- if (release->bo_count < 1) {
- DRM_ERROR("read a released resource with 0 bos\n");
- return NULL;
- }
+
return release;
}
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ if (!ptr)
+ return NULL;
info = ptr + (release->release_offset & ~PAGE_SIZE);
return info;
}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
+
+void qxl_release_fence_buffer_objects(struct qxl_release *release)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+ struct qxl_bo *qbo;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, &release->bos, head) {
+ bo = entry->bo;
+ qbo = to_qxl_bo(bo);
+
+ if (!entry->bo->sync_obj)
+ entry->bo->sync_obj = &qbo->fence;
+
+ qxl_fence_add_release_locked(&qbo->fence, release->id);
+
+ ttm_bo_add_to_lru(bo);
+ ww_mutex_unlock(&bo->resv->lock);
+ entry->reserved = false;
+ }
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ ww_acquire_fini(&release->ticket);
+}
+
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..1dfd84cda2a1 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
return;
}
qbo = container_of(bo, struct qxl_bo, tbo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
*placement = qbo->placement;
}