diff options
author | Abdiel Janulgue <abdiel.janulgue@linux.intel.com> | 2019-12-04 15:00:32 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-12-04 18:11:44 +0300 |
commit | cc662126b4134e25fcfb6cad480de0fa95a4d3d8 (patch) | |
tree | ec58b958d3a2b707a7ecf9bfbc45f4529ff7a30a /drivers/gpu/drm/i915 | |
parent | c415ef2a267cdfda429bb519e651c82132ef89fa (diff) | |
download | linux-cc662126b4134e25fcfb6cad480de0fa95a4d3d8.tar.xz |
drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET
This is really just an alias of mmap_gtt. The 'mmap offset' nomenclature
comes from the value returned by this ioctl which is the offset into the
device fd which userpace uses with mmap(2).
mmap_gtt was our initial mmap_offset implementation, this extends
our CPU mmap support to allow additional fault handlers that depends on
the object's backing pages.
Note that we multiplex mmap_gtt and mmap_offset through the same ioctl,
and use the zero extending behaviour of drm to differentiate between
them, when we inspect the flags.
To support multiple mmap types on an object we need to support multiple
mmap_offsets for an object (each offset in the global device address
space corresponding to a unique instance of the object for a file + mmap
type). As we drop the simplified drm core idea of a single mmap_offset,
we need to provide replacement hooks for the dumb mmap interface as
well.
Link: https://gitlab.freedesktop.org/mesa/mesa/merge_requests/1675
Testcase: igt/gem_mmap_offset
Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191204120032.3682839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_domain.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_ioctls.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_mman.c | 461 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_mman.h | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 88 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_reset.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_getparam.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.h | 3 |
17 files changed, 542 insertions, 142 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 3119f7be9bc0..f2064950622b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -13,6 +13,7 @@ #include "i915_gem_object.h" #include "i915_vma.h" #include "i915_gem_lmem.h" +#include "i915_gem_mman.h" static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h index ddc7f2a52b3e..87d8b27f426d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); +int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index d60973603cc1..37aabbfa869a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -5,6 +5,7 @@ */ #include <linux/mman.h> +#include <linux/pfn_t.h> #include <linux/sizes.h> #include "gt/intel_gt.h" @@ -14,6 +15,7 @@ #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_gem_mman.h" #include "i915_trace.h" #include "i915_vma.h" @@ -144,6 +146,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 3 - Remove implicit set-domain(GTT) and synchronisation on initial * pagefault; swapin remains transparent. * + * 4 - Support multiple fault handlers per object depending on object's + * backing storage (a.k.a. MMAP_OFFSET). + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -171,7 +176,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 3; + return 4; } static inline struct i915_ggtt_view @@ -197,29 +202,83 @@ compute_partial_view(const struct drm_i915_gem_object *obj, return view; } -/** - * i915_gem_fault - fault a page into the GTT - * @vmf: fault info - * - * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped - * from userspace. The fault handler takes care of binding the object to - * the GTT (if needed), allocating and programming a fence register (again, - * only if needed based on whether the old reg is still valid or the object - * is tiled) and inserting a new PTE into the faulting process. - * - * Note that the faulting process may involve evicting existing objects - * from the GTT and/or fence registers to make room. So performance may - * suffer if the GTT working set is large or there are few fence registers - * left. - * - * The current feature set supported by i915_gem_fault() and thus GTT mmaps - * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). - */ -vm_fault_t i915_gem_fault(struct vm_fault *vmf) +static vm_fault_t i915_error_to_vmf_fault(int err) +{ + switch (err) { + default: + WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); + /* fallthrough */ + case -EIO: /* shmemfs failure from swap device */ + case -EFAULT: /* purged object */ + case -ENODEV: /* bad object, how did you get here! */ + return VM_FAULT_SIGBUS; + + case -ENOSPC: /* shmemfs allocation failure */ + case -ENOMEM: /* our allocation failure */ + return VM_FAULT_OOM; + + case 0: + case -EAGAIN: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + } +} + +static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) +{ + struct vm_area_struct *area = vmf->vma; + struct i915_mmap_offset *mmo = area->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + unsigned long i, size = area->vm_end - area->vm_start; + bool write = area->vm_flags & VM_WRITE; + vm_fault_t ret = VM_FAULT_SIGBUS; + int err; + + if (!i915_gem_object_has_struct_page(obj)) + return ret; + + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return ret; + + err = i915_gem_object_pin_pages(obj); + if (err) + return i915_error_to_vmf_fault(err); + + /* PTEs are revoked in obj->ops->put_pages() */ + for (i = 0; i < size >> PAGE_SHIFT; i++) { + struct page *page = i915_gem_object_get_page(obj, i); + + ret = vmf_insert_pfn(area, + (unsigned long)area->vm_start + i * PAGE_SIZE, + page_to_pfn(page)); + if (ret != VM_FAULT_NOPAGE) + break; + } + + if (write) { + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + obj->cache_dirty = true; /* XXX flush after PAT update? */ + obj->mm.dirty = true; + } + + i915_gem_object_unpin_pages(obj); + + return ret; +} + +static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); + struct i915_mmap_offset *mmo = area->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); struct intel_runtime_pm *rpm = &i915->runtime_pm; @@ -312,6 +371,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); mutex_unlock(&i915->ggtt.vm.mutex); + /* Track the mmo associated with the fenced vma */ + vma->mmo = mmo; + if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); @@ -332,67 +394,36 @@ err_rpm: intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); err: - switch (ret) { - default: - WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); - /* fallthrough */ - case -EIO: /* shmemfs failure from swap device */ - case -EFAULT: /* purged object */ - case -ENODEV: /* bad object, how did you get here! */ - return VM_FAULT_SIGBUS; - - case -ENOSPC: /* shmemfs allocation failure */ - case -ENOMEM: /* our allocation failure */ - return VM_FAULT_OOM; - - case 0: - case -EAGAIN: - case -ERESTARTSYS: - case -EINTR: - case -EBUSY: - /* - * EBUSY is ok: this just means that another thread - * already did the job. - */ - return VM_FAULT_NOPAGE; - } + return i915_error_to_vmf_fault(ret); } -void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct i915_vma *vma; GEM_BUG_ON(!obj->userfault_count); - obj->userfault_count = 0; - list_del(&obj->userfault_link); - drm_vma_node_unmap(&obj->base.vma_node, - obj->base.dev->anon_inode->i_mapping); - for_each_ggtt_vma(vma, obj) - i915_vma_unset_userfault(vma); + i915_vma_revoke_mmap(vma); + + GEM_BUG_ON(obj->userfault_count); } -/** - * i915_gem_object_release_mmap - remove physical page mappings - * @obj: obj in question - * - * Preserve the reservation of the mmapping with the DRM core code, but - * relinquish ownership of the pages back to the system. - * +/* * It is vital that we remove the page mapping if we have mapped a tiled * object through the GTT and then lose the fence register due to * resource pressure. Similarly if the object has been moved out of the * aperture, than pages mapped into userspace must be revoked. Removing the * mapping will then trigger a page fault on the next user access, allowing - * fixup by i915_gem_fault(). + * fixup by vm_fault_gtt(). */ -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); intel_wakeref_t wakeref; - /* Serialisation between user GTT access and our code depends upon + /* + * Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user * pagefault then has to wait until we release the mutex. * @@ -406,9 +437,10 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) if (!obj->userfault_count) goto out; - __i915_gem_object_release_mmap(obj); + __i915_gem_object_release_mmap_gtt(obj); - /* Ensure that the CPU's PTE are revoked and there are not outstanding + /* + * Ensure that the CPU's PTE are revoked and there are not outstanding * memory transactions from userspace before we return. The TLB * flushing implied above by changing the PTE above *should* be * sufficient, an extra barrier here just provides us with a bit @@ -422,57 +454,149 @@ out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -static int create_mmap_offset(struct drm_i915_gem_object *obj) +void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct i915_mmap_offset *mmo; + + spin_lock(&obj->mmo.lock); + list_for_each_entry(mmo, &obj->mmo.offsets, offset) { + /* + * vma_node_unmap for GTT mmaps handled already in + * __i915_gem_object_release_mmap_gtt + */ + if (mmo->mmap_type == I915_MMAP_TYPE_GTT) + continue; + + spin_unlock(&obj->mmo.lock); + drm_vma_node_unmap(&mmo->vma_node, + obj->base.dev->anon_inode->i_mapping); + spin_lock(&obj->mmo.lock); + } + spin_unlock(&obj->mmo.lock); +} + +/** + * i915_gem_object_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmapping with the DRM core code, but + * relinquish ownership of the pages back to the system. + */ +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +{ + i915_gem_object_release_mmap_gtt(obj); + i915_gem_object_release_mmap_offset(obj); +} + +static struct i915_mmap_offset * +mmap_offset_attach(struct drm_i915_gem_object *obj, + enum i915_mmap_type mmap_type, + struct drm_file *file) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_gt *gt = &i915->gt; + struct i915_mmap_offset *mmo; int err; - err = drm_gem_create_mmap_offset(&obj->base); + mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); + if (!mmo) + return ERR_PTR(-ENOMEM); + + mmo->obj = obj; + mmo->dev = obj->base.dev; + mmo->file = file; + mmo->mmap_type = mmap_type; + drm_vma_node_reset(&mmo->vma_node); + + err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node, + obj->base.size / PAGE_SIZE); if (likely(!err)) - return 0; + goto out; /* Attempt to reap some mmap space from dead objects */ - err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT); + err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT); if (err) - return err; + goto err; i915_gem_drain_freed_objects(i915); - return drm_gem_create_mmap_offset(&obj->base); + err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node, + obj->base.size / PAGE_SIZE); + if (err) + goto err; + +out: + if (file) + drm_vma_node_allow(&mmo->vma_node, file); + + spin_lock(&obj->mmo.lock); + list_add(&mmo->offset, &obj->mmo.offsets); + spin_unlock(&obj->mmo.lock); + + return mmo; + +err: + kfree(mmo); + return ERR_PTR(err); } -int -i915_gem_mmap_gtt(struct drm_file *file, - struct drm_device *dev, - u32 handle, - u64 *offset) +static int +__assign_mmap_offset(struct drm_file *file, + u32 handle, + enum i915_mmap_type mmap_type, + u64 *offset) { struct drm_i915_gem_object *obj; - int ret; - - if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) - return -ENODEV; + struct i915_mmap_offset *mmo; + int err; obj = i915_gem_object_lookup(file, handle); if (!obj) return -ENOENT; - if (i915_gem_object_never_bind_ggtt(obj)) { - ret = -ENODEV; + if (mmap_type == I915_MMAP_TYPE_GTT && + i915_gem_object_never_bind_ggtt(obj)) { + err = -ENODEV; goto out; } - ret = create_mmap_offset(obj); - if (ret == 0) - *offset = drm_vma_node_offset_addr(&obj->base.vma_node); + if (mmap_type != I915_MMAP_TYPE_GTT && + !i915_gem_object_has_struct_page(obj)) { + err = -ENODEV; + goto out; + } + + mmo = mmap_offset_attach(obj, mmap_type, file); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); + goto out; + } + *offset = drm_vma_node_offset_addr(&mmo->vma_node); + err = 0; out: i915_gem_object_put(obj); - return ret; + return err; +} + +int +i915_gem_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, + u64 *offset) +{ + enum i915_mmap_type mmap_type; + + if (boot_cpu_has(X86_FEATURE_PAT)) + mmap_type = I915_MMAP_TYPE_WC; + else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) + return -ENODEV; + else + mmap_type = I915_MMAP_TYPE_GTT; + + return __assign_mmap_offset(file, handle, mmap_type, offset); } /** - * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data * @file: GEM object info @@ -487,12 +611,167 @@ out: * userspace. */ int -i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) { - struct drm_i915_gem_mmap_gtt *args = data; + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_mmap_offset *args = data; + enum i915_mmap_type type; + + if (args->extensions) + return -EINVAL; + + switch (args->flags) { + case I915_MMAP_OFFSET_GTT: + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return -ENODEV; + type = I915_MMAP_TYPE_GTT; + break; + + case I915_MMAP_OFFSET_WC: + if (!boot_cpu_has(X86_FEATURE_PAT)) + return -ENODEV; + type = I915_MMAP_TYPE_WC; + break; + + case I915_MMAP_OFFSET_WB: + type = I915_MMAP_TYPE_WB; + break; + + case I915_MMAP_OFFSET_UC: + if (!boot_cpu_has(X86_FEATURE_PAT)) + return -ENODEV; + type = I915_MMAP_TYPE_UC; + break; + + default: + return -EINVAL; + } - return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); + return __assign_mmap_offset(file, args->handle, type, &args->offset); +} + +static void vm_open(struct vm_area_struct *vma) +{ + struct i915_mmap_offset *mmo = vma->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + + GEM_BUG_ON(!obj); + i915_gem_object_get(obj); +} + +static void vm_close(struct vm_area_struct *vma) +{ + struct i915_mmap_offset *mmo = vma->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + + GEM_BUG_ON(!obj); + i915_gem_object_put(obj); +} + +static const struct vm_operations_struct vm_ops_gtt = { + .fault = vm_fault_gtt, + .open = vm_open, + .close = vm_close, +}; + +static const struct vm_operations_struct vm_ops_cpu = { + .fault = vm_fault_cpu, + .open = vm_open, + .close = vm_close, +}; + +/* + * This overcomes the limitation in drm_gem_mmap's assignment of a + * drm_gem_object as the vma->vm_private_data. Since we need to + * be able to resolve multiple mmap offsets which could be tied + * to a single gem object. + */ +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_vma_offset_node *node; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct i915_mmap_offset *mmo = NULL; + struct drm_gem_object *obj = NULL; + + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (likely(node)) { + mmo = container_of(node, struct i915_mmap_offset, + vma_node); + /* + * In our dependency chain, the drm_vma_offset_node + * depends on the validity of the mmo, which depends on + * the gem object. However the only reference we have + * at this point is the mmo (as the parent of the node). + * Try to check if the gem object was at least cleared. + */ + if (!mmo || !mmo->obj) { + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + return -EINVAL; + } + /* + * Skip 0-refcnted objects as it is in the process of being + * destroyed and will be invalid when the vma manager lock + * is released. + */ + obj = &mmo->obj->base; + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + if (!obj) + return -EINVAL; + + if (!drm_vma_node_is_allowed(node, priv)) { + drm_gem_object_put_unlocked(obj); + return -EACCES; + } + + if (i915_gem_object_is_readonly(to_intel_bo(obj))) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + vma->vm_flags &= ~VM_MAYWRITE; + } + + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = mmo; + + switch (mmo->mmap_type) { + case I915_MMAP_TYPE_WC: + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_WB: + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_UC: + vma->vm_page_prot = + pgprot_noncached(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_GTT: + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_gtt; + break; + } + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + + return 0; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h new file mode 100644 index 000000000000..862e01b7cb69 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_MMAN_H__ +#define __I915_GEM_MMAN_H__ + +#include <linux/mm_types.h> +#include <linux/types.h> + +struct drm_device; +struct drm_file; +struct drm_i915_gem_object; +struct file; +struct i915_mmap_offset; +struct mutex; + +int i915_gem_mmap_gtt_version(void); +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, + struct drm_device *dev, + u32 handle, u64 *offset); + +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 458945e1823e..16d611db9ca6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -29,6 +29,7 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" +#include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_globals.h" #include "i915_trace.h" @@ -61,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->lut_list); + spin_lock_init(&obj->mmo.lock); + INIT_LIST_HEAD(&obj->mmo.offsets); + init_rcu_head(&obj->rcu); obj->ops = ops; @@ -97,6 +101,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_file_private *fpriv = file->driver_priv; struct i915_lut_handle *lut, *ln; + struct i915_mmap_offset *mmo; LIST_HEAD(close); i915_gem_object_lock(obj); @@ -111,6 +116,17 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) } i915_gem_object_unlock(obj); + spin_lock(&obj->mmo.lock); + list_for_each_entry(mmo, &obj->mmo.offsets, offset) { + if (mmo->file != file) + continue; + + spin_unlock(&obj->mmo.lock); + drm_vma_node_revoke(&mmo->vma_node, file); + spin_lock(&obj->mmo.lock); + } + spin_unlock(&obj->mmo.lock); + list_for_each_entry_safe(lut, ln, &close, obj_link) { struct i915_gem_context *ctx = lut->ctx; struct i915_vma *vma; @@ -158,6 +174,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { + struct i915_mmap_offset *mmo, *mn; + trace_i915_gem_object_destroy(obj); if (!list_empty(&obj->vma.list)) { @@ -183,6 +201,15 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, spin_unlock(&obj->vma.lock); } + i915_gem_object_release_mmap(obj); + + list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { + drm_vma_offset_remove(obj->base.dev->vma_offset_manager, + &mmo->vma_node); + kfree(mmo); + } + INIT_LIST_HEAD(&obj->mmo.offsets); + GEM_BUG_ON(atomic_read(&obj->bind_count)); GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(!list_empty(&obj->lut_list)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index e5750d506cc9..a1eb7c0b23ac 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -132,13 +132,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, static inline void i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) { - obj->base.vma_node.readonly = true; + obj->flags |= I915_BO_READONLY; } static inline bool i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) { - return obj->base.vma_node.readonly; + return obj->flags & I915_BO_READONLY; } static inline bool @@ -387,9 +387,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); - void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 15f8297dc34e..2d404e6f63df 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -63,6 +63,23 @@ struct drm_i915_gem_object_ops { void (*release)(struct drm_i915_gem_object *obj); }; +enum i915_mmap_type { + I915_MMAP_TYPE_GTT = 0, + I915_MMAP_TYPE_WC, + I915_MMAP_TYPE_WB, + I915_MMAP_TYPE_UC, +}; + +struct i915_mmap_offset { + struct drm_device *dev; + struct drm_vma_offset_node vma_node; + struct drm_i915_gem_object *obj; + struct drm_file *file; + enum i915_mmap_type mmap_type; + + struct list_head offset; +}; + struct drm_i915_gem_object { struct drm_gem_object base; @@ -118,12 +135,18 @@ struct drm_i915_gem_object { unsigned int userfault_count; struct list_head userfault_link; + struct { + spinlock_t lock; /* Protects access to mmo offsets */ + struct list_head offsets; + } mmo; + I915_SELFTEST_DECLARE(struct list_head st_link); unsigned long flags; #define I915_BO_ALLOC_CONTIGUOUS BIT(0) #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) +#define I915_BO_READONLY BIT(2) /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index f402c2c415c2..75197ca696a8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -8,6 +8,7 @@ #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_gem_lmem.h" +#include "i915_gem_mman.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -207,6 +208,8 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) goto unlock; } + i915_gem_object_release_mmap_offset(obj); + /* * ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 1fa592d82af5..6c7825a2dc2a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_ioctls.h" +#include "i915_gem_mman.h" #include "i915_gem_object.h" /** diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 6ce9167f8c9f..591435c5f368 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -565,16 +565,16 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, int expected) { struct drm_i915_gem_object *obj; - int err; + struct i915_mmap_offset *mmo; obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) return PTR_ERR(obj); - err = create_mmap_offset(obj); + mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); i915_gem_object_put(obj); - return err == expected; + return PTR_ERR_OR_ZERO(mmo) == expected; } static void disable_retire_worker(struct drm_i915_private *i915) @@ -609,7 +609,8 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; struct drm_mm_node *hole, *next; - int loop, err; + struct i915_mmap_offset *mmo; + int loop, err = 0; /* Disable background reaper */ disable_retire_worker(i915); @@ -673,9 +674,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } - err = create_mmap_offset(obj); - if (err) { + mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); + if (IS_ERR(mmo)) { pr_err("Unable to insert object into reclaimed hole\n"); + err = PTR_ERR(mmo); goto err_obj; } @@ -724,14 +726,15 @@ err_obj: } #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) -static int igt_mmap_gtt(void *arg) +static int igt_mmap(void *arg, enum i915_mmap_type type) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; + struct i915_mmap_offset *mmo; struct vm_area_struct *area; unsigned long addr; void *vaddr; - int err, i; + int err = 0, i; if (!i915_ggtt_has_aperture(&i915->ggtt)) return 0; @@ -749,18 +752,19 @@ static int igt_mmap_gtt(void *arg) i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - err = create_mmap_offset(obj); - if (err) + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); goto out; + } - addr = igt_mmap_node(i915, &obj->base.vma_node, - 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) { err = addr; goto out; } - pr_debug("igt_mmap(obj:gtt) @ %lx\n", addr); + pr_debug("igt_mmap() @ %lx\n", addr); area = find_vma(current->mm, addr); if (!area) { @@ -769,8 +773,8 @@ static int igt_mmap_gtt(void *arg) goto out_unmap; } - if (area->vm_private_data != obj) { - pr_err("vm_area_struct did not point back to our object!\n"); + if (area->vm_private_data != mmo) { + pr_err("vm_area_struct did not point back to our mmap_offset object!\n"); err = -EINVAL; goto out_unmap; } @@ -780,14 +784,14 @@ static int igt_mmap_gtt(void *arg) u32 x; if (get_user(x, ux)) { - pr_err("Unable to read from GTT mmap, offset:%zd\n", + pr_err("Unable to read from mmap, offset:%zd\n", i * sizeof(x)); err = -EFAULT; break; } if (x != expand32(POISON_INUSE)) { - pr_err("Read incorrect value from GTT mmap, offset:%zd, found:%x, expected:%x\n", + pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; break; @@ -795,7 +799,7 @@ static int igt_mmap_gtt(void *arg) x = expand32(POISON_FREE); if (put_user(x, ux)) { - pr_err("Unable to write to GTT mmap, offset:%zd\n", + pr_err("Unable to write to mmap, offset:%zd\n", i * sizeof(x)); err = -EFAULT; break; @@ -811,7 +815,7 @@ out_unmap: goto out; } if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) { - pr_err("Write via GGTT mmap did not land in backing store\n"); + pr_err("Write via mmap did not land in backing store\n"); err = -EINVAL; } i915_gem_object_unpin_map(obj); @@ -821,6 +825,16 @@ out: return err; } +static int igt_mmap_gtt(void *arg) +{ + return igt_mmap(arg, I915_MMAP_TYPE_GTT); +} + +static int igt_mmap_cpu(void *arg) +{ + return igt_mmap(arg, I915_MMAP_TYPE_WC); +} + static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { if (!pte_present(*pte) || pte_none(*pte)) { @@ -873,10 +887,11 @@ static int prefault_range(u64 start, u64 len) return __get_user(c, end - 1); } -static int igt_mmap_gtt_revoke(void *arg) +static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; + struct i915_mmap_offset *mmo; unsigned long addr; int err; @@ -887,12 +902,13 @@ static int igt_mmap_gtt_revoke(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = create_mmap_offset(obj); - if (err) + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); goto out; + } - addr = igt_mmap_node(i915, &obj->base.vma_node, - 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) { err = addr; goto out; @@ -902,7 +918,8 @@ static int igt_mmap_gtt_revoke(void *arg) if (err) goto out_unmap; - GEM_BUG_ON(!atomic_read(&obj->bind_count)); + GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT && + !atomic_read(&obj->bind_count)); err = check_present(addr, obj->base.size); if (err) @@ -920,6 +937,15 @@ static int igt_mmap_gtt_revoke(void *arg) } GEM_BUG_ON(atomic_read(&obj->bind_count)); + if (type != I915_MMAP_TYPE_GTT) { + __i915_gem_object_put_pages(obj); + if (i915_gem_object_has_pages(obj)) { + pr_err("Failed to put-pages object!\n"); + err = -EINVAL; + goto out_unmap; + } + } + err = check_absent(addr, obj->base.size); if (err) goto out_unmap; @@ -931,6 +957,16 @@ out: return err; } +static int igt_mmap_gtt_revoke(void *arg) +{ + return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT); +} + +static int igt_mmap_cpu_revoke(void *arg) +{ + return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC); +} + int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -938,7 +974,9 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), SUBTEST(igt_mmap_gtt), + SUBTEST(igt_mmap_cpu), SUBTEST(igt_mmap_gtt_revoke), + SUBTEST(igt_mmap_cpu_revoke), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 36189238e13c..0e94b720c993 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -678,8 +678,13 @@ static void revoke_mmaps(struct intel_gt *gt) continue; GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); - node = &vma->obj->base.vma_node; + + if (!vma->mmo) + continue; + + node = &vma->mmo->vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, vma->size, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 018914ba2005..5d7e11927729 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,6 +61,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_mman.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" @@ -2660,18 +2661,12 @@ const struct dev_pm_ops i915_pm_ops = { .runtime_resume = intel_runtime_resume, }; -static const struct vm_operations_struct i915_gem_vm_ops = { - .fault = i915_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations i915_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_mmap, + .mmap = i915_gem_mmap, .poll = drm_poll, .read = drm_read, .compat_ioctl = i915_compat_ioctl, @@ -2718,7 +2713,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), @@ -2760,7 +2755,6 @@ static struct drm_driver driver = { .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, - .gem_vm_ops = &i915_gem_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -2771,7 +2765,8 @@ static struct drm_driver driver = { .get_scanout_position = i915_get_crtc_scanoutpos, .dumb_create = i915_gem_dumb_create, - .dumb_map_offset = i915_gem_mmap_gtt, + .dumb_map_offset = i915_gem_dumb_mmap_offset, + .ioctls = i915_ioctls, .num_ioctls = ARRAY_SIZE(i915_ioctls), .fops = &i915_driver_fops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 14744c114475..0d4ad18e10dd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1857,9 +1857,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev) int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, - u32 handle, u64 *offset); -int i915_gem_mmap_gtt_version(void); int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); @@ -1882,7 +1879,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv); void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); -vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb9e2609c569..07b7a8a671d0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -44,6 +44,7 @@ #include "gem/i915_gem_clflush.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_mman.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_engine_user.h" @@ -885,7 +886,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) list_for_each_entry_safe(obj, on, &i915->ggtt.userfault_list, userfault_link) - __i915_gem_object_release_mmap(obj); + __i915_gem_object_release_mmap_gtt(obj); /* * The fence will be lost when the device powers down. If any were diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index cf8a8c3ef047..54fce81d5724 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -2,6 +2,7 @@ * SPDX-License-Identifier: MIT */ +#include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e5512f26e20a..564fa97558c5 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1054,17 +1054,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma) void i915_vma_revoke_mmap(struct i915_vma *vma) { - struct drm_vma_offset_node *node = &vma->obj->base.vma_node; + struct drm_vma_offset_node *node; u64 vma_offset; - lockdep_assert_held(&vma->vm->mutex); - if (!i915_vma_has_userfault(vma)) return; GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!vma->obj->userfault_count); + node = &vma->mmo->vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 465932813bc5..f09f4f513c41 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -63,6 +63,9 @@ struct i915_vma { u64 display_alignment; struct i915_page_sizes page_sizes; + /* mmap-offset associated with fencing for this vma */ + struct i915_mmap_offset *mmo; + u32 fence_size; u32 fence_alignment; |