summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_userptr.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2017-10-07 01:18:17 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2017-10-07 12:11:45 +0300
commitb91b09eea7a15ab417aa9ed6502b3be12f5283f8 (patch)
treea17eb01a664bca3e248231843c000e08be02e869 /drivers/gpu/drm/i915/i915_gem_userptr.c
parent2a9654b2cdd8f9ef51b91dfd4448973a47284825 (diff)
downloadlinux-b91b09eea7a15ab417aa9ed6502b3be12f5283f8.tar.xz
drm/i915: push set_pages down to the callers
Each backend is now responsible for calling __i915_gem_object_set_pages upon successfully gathering its backing storage. This eliminates the inconsistency between the async and sync paths, which stands out even more when we start throwing around an sg_mask in a later patch. Suggested-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-6-matthew.auld@intel.com Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-5-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_userptr.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2d4996de7331..70ad7489827d 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -434,6 +434,8 @@ alloc_table:
return ERR_PTR(ret);
}
+ __i915_gem_object_set_pages(obj, st);
+
return st;
}
@@ -521,7 +523,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages);
if (!IS_ERR(pages)) {
- __i915_gem_object_set_pages(obj, pages);
pinned = 0;
pages = NULL;
}
@@ -582,8 +583,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN);
}
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
@@ -612,9 +612,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return ERR_CAST(obj->userptr.work);
+ return PTR_ERR(obj->userptr.work);
else
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
pvec = NULL;
@@ -650,7 +650,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
release_pages(pvec, pinned, 0);
kvfree(pvec);
- return pages;
+ return PTR_ERR_OR_ZERO(pages);
}
static void