diff options
author | Dave Airlie <airlied@redhat.com> | 2016-11-11 02:25:32 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-11-11 02:25:32 +0300 |
commit | db8feb6979e91c2e916631a75dbfe9f10f6b05e5 (patch) | |
tree | b4aa5965f207c18d908a794e5f4e647604d77553 /drivers/gpu/drm/i915/i915_gem_internal.c | |
parent | afdd548f742ca454fc343696de472f3aaa5dc488 (diff) | |
parent | 58e197d631d44f9f4817b8198b43132a40de1164 (diff) | |
download | linux-db8feb6979e91c2e916631a75dbfe9f10f6b05e5.tar.xz |
Merge tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel into drm-next
- gpu idling rework for s/r (Imre)
- vlv mappable scanout fix
- speed up probing in resume (Lyude)
- dp audio workarounds for gen9 (Dhinakaran)
- more conversion to using dev_priv internally (Ville)
- more gen9+ wm fixes and cleanups (Maarten)
- shrinker cleanup&fixes (Chris)
- reorg plane init code (Ville)
- implement support for multiple timelines (prep work for scheduler)
from Chris and all
- untangle dev->struct_mutex locking as prep for multiple timelines
(Chris)
- refactor bxt phy code and collect it all in intel_dpio_phy.c (Ander)
- another gvt with bugfixes all over from Zhenyu
- piles of lspcon fixes from Imre
- 90/270 rotation fixes (Ville)
- guc log buffer support (Akash+Sagar)
- fbc fixes from Paulo
- untangle rpm vs. tiling-fences/mmaps (Chris)
- fix atomic commit to wait on the right fences (Daniel Stone)
* tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel: (181 commits)
drm/i915: Update DRIVER_DATE to 20161108
drm/i915: Mark CPU cache as dirty when used for rendering
drm/i915: Add assert for no pending GPU requests during suspend/resume in LR mode
drm/i915: Make sure engines are idle during GPU idling in LR mode
drm/i915: Avoid early GPU idling due to race with new request
drm/i915: Avoid early GPU idling due to already pending idle work
drm/i915: Limit Valleyview and earlier to only using mappable scanout
drm/i915: Round tile chunks up for constructing partial VMAs
drm/i915: Remove the vma from the object list upon close
drm/i915: Reinit polling before hpd when resuming
drm/i915: Remove redundant reprobe in i915_drm_resume
drm/i915/dp: Extend BDW DP audio workaround to GEN9 platforms
drm/i915/dp: BDW cdclk fix for DP audio
drm/i915: Fix pages pin counting around swizzle quirk
drm/i915: Fix test on inputs for vma_compare()
drm/i915/guc: Cache the client mapping
drm/i915: Tidy slab cache allocations
drm/i915: Introduce HAS_64BIT_RELOC
drm/i915: Show the execlist queue in debugfs/i915_engine_info
drm/i915: Unify global_list into global_link
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_internal.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_internal.c | 170 |
1 files changed, 170 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c new file mode 100644 index 000000000000..4b3ff3e5b911 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -0,0 +1,170 @@ +/* + * Copyright © 2014-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include <drm/i915_drm.h> +#include "i915_drv.h" + +#define QUIET (__GFP_NORETRY | __GFP_NOWARN) + +/* convert swiotlb segment size into sensible units (pages)! */ +#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT) + +static void internal_free_pages(struct sg_table *st) +{ + struct scatterlist *sg; + + for (sg = st->sgl; sg; sg = __sg_next(sg)) + __free_pages(sg_page(sg), get_order(sg->length)); + + sg_free_table(st); + kfree(st); +} + +static struct sg_table * +i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned int npages = obj->base.size / PAGE_SIZE; + struct sg_table *st; + struct scatterlist *sg; + int max_order; + gfp_t gfp; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(st, npages, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + st->nents = 0; + + max_order = MAX_ORDER; +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ + max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); +#endif + + gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; + if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { + /* 965gm cannot relocate objects above 4GiB. */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA32; + } + + do { + int order = min(fls(npages) - 1, max_order); + struct page *page; + + do { + page = alloc_pages(gfp | (order ? QUIET : 0), order); + if (page) + break; + if (!order--) + goto err; + + /* Limit subsequent allocations as well */ + max_order = order; + } while (1); + + sg_set_page(sg, page, PAGE_SIZE << order, 0); + st->nents++; + + npages -= 1 << order; + if (!npages) { + sg_mark_end(sg); + break; + } + + sg = __sg_next(sg); + } while (1); + + if (i915_gem_gtt_prepare_pages(obj, st)) + goto err; + + /* Mark the pages as dontneed whilst they are still pinned. As soon + * as they are unpinned they are allowed to be reaped by the shrinker, + * and the caller is expected to repopulate - the contents of this + * object are only valid whilst active and pinned. + */ + obj->mm.madv = I915_MADV_DONTNEED; + return st; + +err: + sg_mark_end(sg); + internal_free_pages(st); + return ERR_PTR(-ENOMEM); +} + +static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + i915_gem_gtt_finish_pages(obj, pages); + internal_free_pages(pages); + + obj->mm.dirty = false; + obj->mm.madv = I915_MADV_WILLNEED; +} + +static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = i915_gem_object_get_pages_internal, + .put_pages = i915_gem_object_put_pages_internal, +}; + +/** + * Creates a new object that wraps some internal memory for private use. + * This object is not backed by swappable storage, and as such its contents + * are volatile and only valid whilst pinned. If the object is reaped by the + * shrinker, its pages and data will be discarded. Equally, it is not a full + * GEM object and so not valid for access from userspace. This makes it useful + * for hardware interfaces like ringbuffers (which are pinned from the time + * the request is written to the time the hardware stops accessing it), but + * not for contexts (which need to be preserved when not active for later + * reuse). Note that it is not cleared upon allocation. + */ +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + unsigned int size) +{ + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_alloc(&i915->drm); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_object_internal_ops); + + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; + + return obj; +} |