diff options
author | Abdiel Janulgue <abdiel.janulgue@linux.intel.com> | 2020-01-03 23:41:35 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2020-01-04 20:57:46 +0300 |
commit | 4e598fad226be0d044d318b6c49cec2ec2b72be3 (patch) | |
tree | af71ca5cb9704c866aad5bfdb439b049ab52a089 /drivers/gpu/drm/i915/i915_mm.c | |
parent | b2fcaac98bbec6036d17a0a120a19b64e65f6534 (diff) | |
download | linux-4e598fad226be0d044d318b6c49cec2ec2b72be3.tar.xz |
drm/i915/gem: Extend mmap support for lmem
Local memory objects are similar to our usual scatterlist, but instead
of using the struct page stored therein, we need to use the
sg->dma_address.
Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200103204137.2131004-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_mm.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_mm.c | 34 |
1 files changed, 22 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 2998689e6d42..b6376b25ef63 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -35,6 +35,7 @@ struct remap_pfn { pgprot_t prot; struct sgt_iter sgt; + resource_size_t iobase; }; static int remap_pfn(pte_t *pte, unsigned long addr, void *data) @@ -48,12 +49,17 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data) return 0; } -static inline unsigned long sgt_pfn(const struct sgt_iter *sgt) +#define use_dma(io) ((io) != -1) + +static inline unsigned long sgt_pfn(const struct remap_pfn *r) { - return sgt->pfn + (sgt->curr >> PAGE_SHIFT); + if (use_dma(r->iobase)) + return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; + else + return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); } -static int remap_sg_page(pte_t *pte, unsigned long addr, void *data) +static int remap_sg(pte_t *pte, unsigned long addr, void *data) { struct remap_pfn *r = data; @@ -62,12 +68,12 @@ static int remap_sg_page(pte_t *pte, unsigned long addr, void *data) /* Special PTE are not associated with any struct page */ set_pte_at(r->mm, addr, pte, - pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt), r->prot))); + pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); r->pfn++; /* track insertions in case we need to unwind later */ r->sgt.curr += PAGE_SIZE; if (r->sgt.curr >= r->sgt.max) - r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), false); + r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); return 0; } @@ -108,30 +114,34 @@ int remap_io_mapping(struct vm_area_struct *vma, } /** - * remap_io_sg_page - remap an IO mapping to userspace + * remap_io_sg - remap an IO mapping to userspace * @vma: user vma to map to * @addr: target user address to start at * @size: size of map area * @sgl: Start sg entry + * @iobase: Use stored dma address offset by this address or pfn if -1 * * Note: this is only safe if the mm semaphore is held when called. */ -int remap_io_sg_page(struct vm_area_struct *vma, - unsigned long addr, unsigned long size, - struct scatterlist *sgl) +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase) { struct remap_pfn r = { .mm = vma->vm_mm, .prot = vma->vm_page_prot, - .sgt = __sgt_iter(sgl, false), + .sgt = __sgt_iter(sgl, use_dma(iobase)), + .iobase = iobase, }; int err; /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); - flush_cache_range(vma, addr, size); - err = apply_to_page_range(r.mm, addr, size, remap_sg_page, &r); + if (!use_dma(iobase)) + flush_cache_range(vma, addr, size); + + err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); if (unlikely(err)) { zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); return err; |