summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-28 15:58:35 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2016-10-28 22:53:46 +0300
commita4f5ea64f0a818586b9de71803824b43dd01e517 (patch)
tree9a786a5df5fc622a5a3824580e489f0e9ee601d6 /drivers/gpu/drm/i915/i915_gem_gtt.c
parentd2a84a76a3b970fa32e6eda3d85e7782f831379e (diff)
downloadlinux-a4f5ea64f0a818586b9de71803824b43dd01e517.tar.xz
drm/i915: Refactor object page API
The plan is to make obtaining the backing storage for the object avoid struct_mutex (i.e. use its own locking). The first step is to update the API so that normal users only call pin/unpin whilst working on the backing storage. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-12-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b3f341fe77bf..794ccc4cffaa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -175,7 +175,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@@ -2373,7 +2373,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
+ obj->mm.pages->sgl, obj->mm.pages->nents,
PCI_DMA_BIDIRECTIONAL))
return -ENOSPC;
@@ -2710,7 +2710,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
- dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
+ dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
PCI_DMA_BIDIRECTIONAL);
}
@@ -3548,7 +3548,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@@ -3641,7 +3641,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);