diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-10-08 15:39:54 +0300 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-10-19 13:12:02 +0300 |
commit | def0c5f6b0cd58cfc0b5702b1e1b1f5078debc35 (patch) | |
tree | fa8d4dc46cb69bca41d6697fb17ff754dce755f5 | |
parent | 1f9a99e0e75f29776d6f4062a03edc5e41c60596 (diff) | |
download | linux-def0c5f6b0cd58cfc0b5702b1e1b1f5078debc35.tar.xz |
drm/i915: Map the ringbuffer using WB on LLC machines
If we have llc coherency, we can write directly into the ringbuffer
using ordinary cached writes rather than forcing WC access.
v2: An important consequence is that we can forgo the mappable request
for WB ringbuffers, allowing for many more simultaneous contexts.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 70 |
1 files changed, 56 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0359736fe979..d6e12de82aaa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2002,11 +2002,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { - iounmap(ringbuf->virtual_start); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) + vunmap(ringbuf->virtual_start); + else + iounmap(ringbuf->virtual_start); ringbuf->virtual_start = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); } +static u32 *vmap_obj(struct drm_i915_gem_object *obj) +{ + struct sg_page_iter sg_iter; + struct page **pages; + void *addr; + int i; + + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); + if (pages == NULL) + return NULL; + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) + pages[i++] = sg_page_iter_page(&sg_iter); + + addr = vmap(pages, i, 0, PAGE_KERNEL); + drm_free_large(pages); + + return addr; +} + int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { @@ -2014,21 +2038,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct drm_i915_gem_object *obj = ringbuf->obj; int ret; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); - if (ret) - return ret; + if (HAS_LLC(dev_priv) && !obj->stolen) { + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); + if (ret) + return ret; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) { - i915_gem_object_ggtt_unpin(obj); - return ret; - } + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = vmap_obj(obj); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -ENOMEM; + } + } else { + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); + if (ret) + return ret; - ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (ringbuf->virtual_start == NULL) { - i915_gem_object_ggtt_unpin(obj); - return -EINVAL; + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + + i915_gem_obj_ggtt_offset(obj), ringbuf->size); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -EINVAL; + } } return 0; |