diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 525 |
1 files changed, 276 insertions, 249 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 24b5b046754b..88f3628b4e29 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -38,6 +38,7 @@ #include <linux/reservation.h> #include <linux/shmem_fs.h> #include <linux/slab.h> +#include <linux/stop_machine.h> #include <linux/swap.h> #include <linux/pci.h> #include <linux/dma-buf.h> @@ -68,11 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) { memset(node, 0, sizeof(*node)); - return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, - size, 0, -1, - 0, ggtt->mappable_end, - DRM_MM_SEARCH_DEFAULT, - DRM_MM_CREATE_DEFAULT); + return drm_mm_insert_node_in_range(&ggtt->base.mm, node, + size, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); } static void @@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, timeout = i915_gem_object_wait_fence(shared[i], flags, timeout, rps); - if (timeout <= 0) + if (timeout < 0) break; dma_fence_put(shared[i]); @@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, excl = reservation_object_get_excl_rcu(resv); } - if (excl && timeout > 0) + if (excl && timeout >= 0) timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); dma_fence_put(excl); @@ -612,9 +612,8 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return 0; } -void *i915_gem_object_alloc(struct drm_device *dev) +void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); } @@ -626,7 +625,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) static int i915_gem_create(struct drm_file *file, - struct drm_device *dev, + struct drm_i915_private *dev_priv, uint64_t size, uint32_t *handle_p) { @@ -639,7 +638,7 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_object_create(dev, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -661,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file, /* have to work out size/pitch and return them */ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; - return i915_gem_create(file, dev, + return i915_gem_create(file, to_i915(dev), args->size, &args->handle); } @@ -675,11 +674,12 @@ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_create *args = data; - i915_gem_flush_free_objects(to_i915(dev)); + i915_gem_flush_free_objects(dev_priv); - return i915_gem_create(file, dev, + return i915_gem_create(file, dev_priv, args->size, &args->handle); } @@ -1114,8 +1114,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -ENOENT; /* Bounds check source. */ - if (args->offset > obj->base.size || - args->size > obj->base.size - args->offset) { + if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { ret = -EINVAL; goto out; } @@ -1428,8 +1427,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -ENOENT; /* Bounds check destination. */ - if (args->offset > obj->base.size || - args->size > obj->base.size - args->offset) { + if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { ret = -EINVAL; goto err; } @@ -1491,7 +1489,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!i915_vma_is_ggtt(vma)) - continue; + break; if (i915_vma_is_active(vma)) continue; @@ -1696,12 +1694,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) { - u64 size; - - size = i915_gem_object_get_stride(obj); - size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8; - - return size >> PAGE_SHIFT; + return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; } /** @@ -1754,6 +1747,29 @@ int i915_gem_mmap_gtt_version(void) return 1; } +static inline struct i915_ggtt_view +compute_partial_view(struct drm_i915_gem_object *obj, + pgoff_t page_offset, + unsigned int chunk) +{ + struct i915_ggtt_view view; + + if (i915_gem_object_is_tiled(obj)) + chunk = roundup(chunk, tile_row_pages(obj)); + + view.type = I915_GGTT_VIEW_PARTIAL; + view.partial.offset = rounddown(page_offset, chunk); + view.partial.size = + min_t(unsigned int, chunk, + (obj->base.size >> PAGE_SHIFT) - view.partial.offset); + + /* If the partial covers the entire object, just create a normal VMA. */ + if (chunk >= obj->base.size >> PAGE_SHIFT) + view.type = I915_GGTT_VIEW_NORMAL; + + return view; +} + /** * i915_gem_fault - fault a page into the GTT * @area: CPU VMA in question @@ -1830,26 +1846,9 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); if (IS_ERR(vma)) { - struct i915_ggtt_view view; - unsigned int chunk_size; - /* Use a partial view if it is bigger than available space */ - chunk_size = MIN_CHUNK_PAGES; - if (i915_gem_object_is_tiled(obj)) - chunk_size = roundup(chunk_size, tile_row_pages(obj)); - - memset(&view, 0, sizeof(view)); - view.type = I915_GGTT_VIEW_PARTIAL; - view.params.partial.offset = rounddown(page_offset, chunk_size); - view.params.partial.size = - min_t(unsigned int, chunk_size, - vma_pages(area) - view.params.partial.offset); - - /* If the partial covers the entire object, just create a - * normal VMA. - */ - if (chunk_size >= obj->base.size >> PAGE_SHIFT) - view.type = I915_GGTT_VIEW_NORMAL; + struct i915_ggtt_view view = + compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); /* Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. @@ -1878,7 +1877,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) /* Finally, remap it using the new GTT offset */ ret = remap_io_mapping(area, - area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT), + area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->mappable); @@ -2029,91 +2028,27 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) } } -/** - * i915_gem_get_ggtt_size - return required global GTT size for an object - * @dev_priv: i915 device - * @size: object size - * @tiling_mode: tiling mode - * - * Return the required global GTT size for an object, taking into account - * potential fence register mapping. - */ -u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, - u64 size, int tiling_mode) -{ - u64 ggtt_size; - - GEM_BUG_ON(size == 0); - - if (INTEL_GEN(dev_priv) >= 4 || - tiling_mode == I915_TILING_NONE) - return size; - - /* Previous chips need a power-of-two fence region when tiling */ - if (IS_GEN3(dev_priv)) - ggtt_size = 1024*1024; - else - ggtt_size = 512*1024; - - while (ggtt_size < size) - ggtt_size <<= 1; - - return ggtt_size; -} - -/** - * i915_gem_get_ggtt_alignment - return required global GTT alignment - * @dev_priv: i915 device - * @size: object size - * @tiling_mode: tiling mode - * @fenced: is fenced alignment required or not - * - * Return the required global GTT alignment for an object, taking into account - * potential fence register mapping. - */ -u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, - int tiling_mode, bool fenced) -{ - GEM_BUG_ON(size == 0); - - /* - * Minimum alignment is 4k (GTT page size), but might be greater - * if a fence register is needed for the object. - */ - if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) || - tiling_mode == I915_TILING_NONE) - return 4096; - - /* - * Previous chips need to be aligned to the size of the smallest - * fence register that can contain the object. - */ - return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode); -} - static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int err; err = drm_gem_create_mmap_offset(&obj->base); - if (!err) + if (likely(!err)) return 0; - /* We can idle the GPU locklessly to flush stale objects, but in order - * to claim that space for ourselves, we need to take the big - * struct_mutex to free the requests+objects and allocate our slot. - */ - err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); - if (err) - return err; + /* Attempt to reap some mmap space from dead objects */ + do { + err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); + if (err) + break; - err = i915_mutex_lock_interruptible(&dev_priv->drm); - if (!err) { - i915_gem_retire_requests(dev_priv); + i915_gem_drain_freed_objects(dev_priv); err = drm_gem_create_mmap_offset(&obj->base); - mutex_unlock(&dev_priv->drm.struct_mutex); - } + if (!err) + break; + + } while (flush_delayed_work(&dev_priv->gt.retire_work)); return err; } @@ -2306,6 +2241,7 @@ static void i915_sg_trim(struct sg_table *orig_st) /* called before being DMA mapped, no need to copy sg->dma_* */ new_sg = sg_next(new_sg); } + GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ sg_free_table(orig_st); @@ -2627,35 +2563,34 @@ err_unlock: goto out_unlock; } -static bool i915_context_is_banned(const struct i915_gem_context *ctx) +static bool ban_context(const struct i915_gem_context *ctx) { - unsigned long elapsed; + return (i915_gem_context_is_bannable(ctx) && + ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD); +} - if (ctx->hang_stats.banned) - return true; +static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) +{ + ctx->guilty_count++; + ctx->ban_score += CONTEXT_SCORE_GUILTY; + if (ban_context(ctx)) + i915_gem_context_set_banned(ctx); - elapsed = get_seconds() - ctx->hang_stats.guilty_ts; - if (ctx->hang_stats.ban_period_seconds && - elapsed <= ctx->hang_stats.ban_period_seconds) { - DRM_DEBUG("context hanging too fast, banning!\n"); - return true; - } + DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", + ctx->name, ctx->ban_score, + yesno(i915_gem_context_is_banned(ctx))); + + if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv)) + return; - return false; + ctx->file_priv->context_bans++; + DRM_DEBUG_DRIVER("client %s has had %d context banned\n", + ctx->name, ctx->file_priv->context_bans); } -static void i915_set_reset_status(struct i915_gem_context *ctx, - const bool guilty) +static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) { - struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - - if (guilty) { - hs->banned = i915_context_is_banned(ctx); - hs->batch_active++; - hs->guilty_ts = get_seconds(); - } else { - hs->batch_pending++; - } + ctx->active_count++; } struct drm_i915_gem_request * @@ -2675,13 +2610,52 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) if (__i915_gem_request_completed(request)) continue; + GEM_BUG_ON(request->engine != engine); return request; } return NULL; } -static void reset_request(struct drm_i915_gem_request *request) +static bool engine_stalled(struct intel_engine_cs *engine) +{ + if (!engine->hangcheck.stalled) + return false; + + /* Check for possible seqno movement after hang declaration */ + if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { + DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); + return false; + } + + return true; +} + +int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Ensure irq handler finishes, and not run again. */ + for_each_engine(engine, dev_priv, id) { + struct drm_i915_gem_request *request; + + tasklet_kill(&engine->irq_tasklet); + + if (engine_stalled(engine)) { + request = i915_gem_find_active_request(engine); + if (request && request->fence.error == -EIO) + err = -EIO; /* Previous reset failed! */ + } + } + + i915_gem_revoke_fences(dev_priv); + + return err; +} + +static void skip_request(struct drm_i915_gem_request *request) { void *vaddr = request->ring->vaddr; u32 head; @@ -2696,66 +2670,93 @@ static void reset_request(struct drm_i915_gem_request *request) head = 0; } memset(vaddr + head, 0, request->postfix - head); + + dma_fence_set_error(&request->fence, -EIO); } -static void i915_gem_reset_engine(struct intel_engine_cs *engine) +static void engine_skip_context(struct drm_i915_gem_request *request) { - struct drm_i915_gem_request *request; - struct i915_gem_context *incomplete_ctx; + struct intel_engine_cs *engine = request->engine; + struct i915_gem_context *hung_ctx = request->ctx; struct intel_timeline *timeline; unsigned long flags; - bool ring_hung; - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); + timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); - request = i915_gem_find_active_request(engine); - if (!request) - return; + spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock(&timeline->lock); - ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; - if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) - ring_hung = false; + list_for_each_entry_continue(request, &engine->timeline->requests, link) + if (request->ctx == hung_ctx) + skip_request(request); - i915_set_reset_status(request->ctx, ring_hung); - if (!ring_hung) - return; + list_for_each_entry(request, &timeline->requests, link) + skip_request(request); - DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", - engine->name, request->global_seqno); + spin_unlock(&timeline->lock); + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} - /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset_hw(engine, request); +/* Returns true if the request was guilty of hang */ +static bool i915_gem_reset_request(struct drm_i915_gem_request *request) +{ + /* Read once and return the resolution */ + const bool guilty = engine_stalled(request->engine); - /* Users of the default context do not rely on logical state - * preserved between batches. They have to emit full state on - * every batch and so it is safe to execute queued requests following - * the hang. + /* The guilty request will get skipped on a hung engine. + * + * Users of client default contexts do not rely on logical + * state preserved between batches so it is safe to execute + * queued requests following the hang. Non default contexts + * rely on preserved state, so skipping a batch loses the + * evolution of the state and it needs to be considered corrupted. + * Executing more queued batches on top of corrupted state is + * risky. But we take the risk by trying to advance through + * the queued requests in order to make the client behaviour + * more predictable around resets, by not throwing away random + * amount of batches it has prepared for execution. Sophisticated + * clients can use gem_reset_stats_ioctl and dma fence status + * (exported via sync_file info ioctl on explicit fences) to observe + * when it loses the context state and should rebuild accordingly. * - * Other contexts preserve state, now corrupt. We want to skip all - * queued requests that reference the corrupt context. + * The context ban, and ultimately the client ban, mechanism are safety + * valves if client submission ends up resulting in nothing more than + * subsequent hangs. */ - incomplete_ctx = request->ctx; - if (i915_gem_context_is_default(incomplete_ctx)) - return; - timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); + if (guilty) { + i915_gem_context_mark_guilty(request->ctx); + skip_request(request); + } else { + i915_gem_context_mark_innocent(request->ctx); + dma_fence_set_error(&request->fence, -EAGAIN); + } - spin_lock_irqsave(&engine->timeline->lock, flags); - spin_lock(&timeline->lock); + return guilty; +} - list_for_each_entry_continue(request, &engine->timeline->requests, link) - if (request->ctx == incomplete_ctx) - reset_request(request); +static void i915_gem_reset_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request; - list_for_each_entry(request, &timeline->requests, link) - reset_request(request); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); - spin_unlock(&timeline->lock); - spin_unlock_irqrestore(&engine->timeline->lock, flags); + request = i915_gem_find_active_request(engine); + if (request && i915_gem_reset_request(request)) { + DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", + engine->name, request->global_seqno); + + /* If this context is now banned, skip all pending requests. */ + if (i915_gem_context_is_banned(request->ctx)) + engine_skip_context(request); + } + + /* Setup the CS to resume from the breadcrumb of the hung request */ + engine->reset_hw(engine, request); } -void i915_gem_reset(struct drm_i915_private *dev_priv) +void i915_gem_reset_finish(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -2779,14 +2780,30 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) static void nop_submit_request(struct drm_i915_gem_request *request) { + dma_fence_set_error(&request->fence, -EIO); i915_gem_request_submit(request); intel_engine_init_global_seqno(request->engine, request->global_seqno); } -static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) +static void engine_set_wedged(struct intel_engine_cs *engine) { + struct drm_i915_gem_request *request; + unsigned long flags; + + /* We need to be sure that no thread is running the old callback as + * we install the nop handler (otherwise we would submit a request + * to hardware that will never complete). In order to prevent this + * race, we wait until the machine is idle before making the swap + * (using stop_machine()). + */ engine->submit_request = nop_submit_request; + /* Mark all executing requests as skipped */ + spin_lock_irqsave(&engine->timeline->lock, flags); + list_for_each_entry(request, &engine->timeline->requests, link) + dma_fence_set_error(&request->fence, -EIO); + spin_unlock_irqrestore(&engine->timeline->lock, flags); + /* Mark all pending requests as complete so that any concurrent * (lockless) lookup doesn't try and wait upon the request as we * reset it. @@ -2815,20 +2832,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) } } -void i915_gem_set_wedged(struct drm_i915_private *dev_priv) +static int __i915_gem_set_wedged_BKL(void *data) { + struct drm_i915_private *i915 = data; struct intel_engine_cs *engine; enum intel_engine_id id; + for_each_engine(engine, i915, id) + engine_set_wedged(engine); + + return 0; +} + +void i915_gem_set_wedged(struct drm_i915_private *dev_priv) +{ lockdep_assert_held(&dev_priv->drm.struct_mutex); set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); - i915_gem_context_lost(dev_priv); - for_each_engine(engine, dev_priv, id) - i915_gem_cleanup_engine(engine); - mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); + stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); + i915_gem_context_lost(dev_priv); i915_gem_retire_requests(dev_priv); + + mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); } static void @@ -3373,7 +3399,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; - int ret; + int ret = 0; switch (args->caching) { case I915_CACHING_NONE: @@ -3398,20 +3424,29 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = i915_mutex_lock_interruptible(dev); + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + if (obj->cache_level == level) + goto out; + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT, + to_rps_client(file)); if (ret) - return ret; + goto out; - obj = i915_gem_object_lookup(file, args->handle); - if (!obj) { - ret = -ENOENT; - goto unlock; - } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto out; ret = i915_gem_object_set_cache_level(obj, level); - i915_gem_object_put(obj); -unlock: mutex_unlock(&dev->struct_mutex); + +out: + i915_gem_object_put(obj); return ret; } @@ -3461,7 +3496,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * try to preserve the existing ABI). */ vma = ERR_PTR(-ENOSPC); - if (view->type == I915_GGTT_VIEW_NORMAL) + if (!view || view->type == I915_GGTT_VIEW_NORMAL) vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, PIN_MAPPABLE | PIN_NONBLOCK); if (IS_ERR(vma)) { @@ -3514,17 +3549,16 @@ err_unpin_display: void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); if (WARN_ON(vma->obj->pin_display == 0)) return; if (--vma->obj->pin_display == 0) - vma->display_alignment = 0; + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; /* Bump the LRU to try and avoid premature eviction whilst flipping */ - if (!i915_vma_is_active(vma)) - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + i915_gem_object_bump_inactive_ggtt(vma->obj); i915_vma_unpin(vma); } @@ -3655,8 +3689,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, lockdep_assert_held(&obj->base.dev->struct_mutex); - vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view); - if (IS_ERR(vma)) + vma = i915_vma_instance(obj, vm, view); + if (unlikely(IS_ERR(vma))) return vma; if (i915_vma_misplaced(vma, size, alignment, flags)) { @@ -3665,10 +3699,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOSPC); if (flags & PIN_MAPPABLE) { - u32 fence_size; - - fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size, - i915_gem_object_get_tiling(obj)); /* If the required space is larger than the available * aperture, we will not able to find a slot for the * object and unbinding the object now will be in @@ -3676,7 +3706,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, * the object in and out of the Global GTT and * waste a lot of cycles under the mutex. */ - if (fence_size > dev_priv->ggtt.mappable_end) + if (vma->fence_size > dev_priv->ggtt.mappable_end) return ERR_PTR(-E2BIG); /* If NONBLOCK is set the caller is optimistically @@ -3695,7 +3725,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, * we could try to minimise harm to others. */ if (flags & PIN_NONBLOCK && - fence_size > dev_priv->ggtt.mappable_end / 2) + vma->fence_size > dev_priv->ggtt.mappable_end / 2) return ERR_PTR(-ENOSPC); } @@ -3948,14 +3978,9 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .put_pages = i915_gem_object_put_pages_gtt, }; -/* Note we don't consider signbits :| */ -#define overflows_type(x, T) \ - (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) - struct drm_i915_gem_object * -i915_gem_object_create(struct drm_device *dev, u64 size) +i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct address_space *mapping; gfp_t mask; @@ -3972,16 +3997,16 @@ i915_gem_object_create(struct drm_device *dev, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(dev); + obj = i915_gem_object_alloc(dev_priv); if (obj == NULL) return ERR_PTR(-ENOMEM); - ret = drm_gem_object_init(dev, &obj->base, size); + ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); if (ret) goto fail; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; - if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) { + if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { /* 965gm cannot relocate objects above 4GiB. */ mask &= ~__GFP_HIGHMEM; mask |= __GFP_DMA32; @@ -4174,12 +4199,13 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) enum intel_engine_id id; for_each_engine(engine, dev_priv, id) - GEM_BUG_ON(engine->last_context != dev_priv->kernel_context); + GEM_BUG_ON(engine->last_retired_context && + !i915_gem_context_is_kernel(engine->last_retired_context)); } -int i915_gem_suspend(struct drm_device *dev) +int i915_gem_suspend(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; int ret; intel_suspend_gt_powersave(dev_priv); @@ -4213,8 +4239,14 @@ int i915_gem_suspend(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&dev_priv->gt.retire_work); - flush_delayed_work(&dev_priv->gt.idle_work); - flush_work(&dev_priv->mm.free_work); + + /* As the idle_work is rearming if it detects a race, play safe and + * repeat the flush until it is definitely idle. + */ + while (flush_delayed_work(&dev_priv->gt.idle_work)) + ; + + i915_gem_drain_freed_objects(dev_priv); /* Assert that we sucessfully flushed all the work and * reset the GPU back to its idle, low power state. @@ -4253,9 +4285,9 @@ err: return ret; } -void i915_gem_resume(struct drm_device *dev) +void i915_gem_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; WARN_ON(dev_priv->gt.awake); @@ -4320,9 +4352,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv) } int -i915_gem_init_hw(struct drm_device *dev) +i915_gem_init_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; enum intel_engine_id id; int ret; @@ -4376,10 +4407,10 @@ i915_gem_init_hw(struct drm_device *dev) goto out; } - intel_mocs_init_l3cc_table(dev); + intel_mocs_init_l3cc_table(dev_priv); /* We can't enable contexts until all firmware is loaded */ - ret = intel_guc_setup(dev); + ret = intel_guc_setup(dev_priv); if (ret) goto out; @@ -4409,12 +4440,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) return true; } -int i915_gem_init(struct drm_device *dev) +int i915_gem_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int ret; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); if (!i915.enable_execlists) { dev_priv->gt.resume = intel_legacy_submission_resume; @@ -4438,15 +4468,15 @@ int i915_gem_init(struct drm_device *dev) if (ret) goto out_unlock; - ret = i915_gem_context_init(dev); + ret = i915_gem_context_init(dev_priv); if (ret) goto out_unlock; - ret = intel_engines_init(dev); + ret = intel_engines_init(dev_priv); if (ret) goto out_unlock; - ret = i915_gem_init_hw(dev); + ret = i915_gem_init_hw(dev_priv); if (ret == -EIO) { /* Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, @@ -4459,15 +4489,14 @@ int i915_gem_init(struct drm_device *dev) out_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } void -i915_gem_cleanup_engines(struct drm_device *dev) +i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; enum intel_engine_id id; @@ -4483,8 +4512,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) dev_priv->num_fence_regs = 32; - else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) || - IS_I945GM(dev_priv) || IS_G33(dev_priv)) + else if (INTEL_INFO(dev_priv)->gen >= 4 || + IS_I945G(dev_priv) || IS_I945GM(dev_priv) || + IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) dev_priv->num_fence_regs = 16; else dev_priv->num_fence_regs = 8; @@ -4507,9 +4537,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) } int -i915_gem_load_init(struct drm_device *dev) +i915_gem_load_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int err = -ENOMEM; dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); @@ -4578,10 +4607,8 @@ err_out: return err; } -void i915_gem_load_cleanup(struct drm_device *dev) +void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - WARN_ON(!llist_empty(&dev_priv->mm.free_list)); mutex_lock(&dev_priv->drm.struct_mutex); @@ -4732,7 +4759,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * -i915_gem_object_create_from_data(struct drm_device *dev, +i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size) { struct drm_i915_gem_object *obj; @@ -4740,7 +4767,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, size_t bytes; int ret; - obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); + obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); if (IS_ERR(obj)) return obj; |