diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 228 |
1 files changed, 141 insertions, 87 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c6aa761ca085..abd81fb9b0b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -375,37 +375,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void stash_init(struct pagestash *stash) +{ + pagevec_init(&stash->pvec); + spin_lock_init(&stash->lock); +} + +static struct page *stash_pop_page(struct pagestash *stash) +{ + struct page *page = NULL; + + spin_lock(&stash->lock); + if (likely(stash->pvec.nr)) + page = stash->pvec.pages[--stash->pvec.nr]; + spin_unlock(&stash->lock); + + return page; +} + +static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) +{ + int nr; + + spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); + + nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); + memcpy(stash->pvec.pages + stash->pvec.nr, + pvec->pages + pvec->nr - nr, + sizeof(pvec->pages[0]) * nr); + stash->pvec.nr += nr; + + spin_unlock(&stash->lock); + + pvec->nr -= nr; +} + static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { - struct pagevec *pvec = &vm->free_pages; - struct pagevec stash; + struct pagevec stack; + struct page *page; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->free_pages); + if (page) + return page; if (!vm->pt_kmap_wc) return alloc_page(gfp); - /* A placeholder for a specific mutex to guard the WC stash */ - lockdep_assert_held(&vm->i915->drm.struct_mutex); - /* Look in our global stash of WC pages... */ - pvec = &vm->i915->mm.wc_stash; - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->i915->mm.wc_stash); + if (page) + return page; /* - * Otherwise batch allocate pages to amoritize cost of set_pages_wc. + * Otherwise batch allocate pages to amortize cost of set_pages_wc. * * We have to be careful as page allocation may trigger the shrinker * (via direct reclaim) which will fill up the WC stash underneath us. * So we add our WB pages into a temporary pvec on the stack and merge * them into the WC stash after all the allocations are complete. */ - pagevec_init(&stash); + pagevec_init(&stack); do { struct page *page; @@ -413,59 +446,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) break; - stash.pages[stash.nr++] = page; - } while (stash.nr < pagevec_space(pvec)); + stack.pages[stack.nr++] = page; + } while (pagevec_space(&stack)); - if (stash.nr) { - int nr = min_t(int, stash.nr, pagevec_space(pvec)); - struct page **pages = stash.pages + stash.nr - nr; + if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { + page = stack.pages[--stack.nr]; - if (nr && !set_pages_array_wc(pages, nr)) { - memcpy(pvec->pages + pvec->nr, - pages, sizeof(pages[0]) * nr); - pvec->nr += nr; - stash.nr -= nr; - } + /* Merge spare WC pages to the global stash */ + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); - pagevec_release(&stash); + /* Push any surplus WC pages onto the local VM stash */ + if (stack.nr) + stash_push_pagevec(&vm->free_pages, &stack); } - return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL; + /* Return unwanted leftovers */ + if (unlikely(stack.nr)) { + WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); + __pagevec_release(&stack); + } + + return page; } static void vm_free_pages_release(struct i915_address_space *vm, bool immediate) { - struct pagevec *pvec = &vm->free_pages; + struct pagevec *pvec = &vm->free_pages.pvec; + struct pagevec stack; + lockdep_assert_held(&vm->free_pages.lock); GEM_BUG_ON(!pagevec_count(pvec)); if (vm->pt_kmap_wc) { - struct pagevec *stash = &vm->i915->mm.wc_stash; - - /* When we use WC, first fill up the global stash and then + /* + * When we use WC, first fill up the global stash and then * only if full immediately free the overflow. */ + stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - lockdep_assert_held(&vm->i915->drm.struct_mutex); - if (pagevec_space(stash)) { - do { - stash->pages[stash->nr++] = - pvec->pages[--pvec->nr]; - if (!pvec->nr) - return; - } while (pagevec_space(stash)); - - /* As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (!immediate) - return; - } + /* + * As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) + return; + + /* + * We have to drop the lock to allow ourselves to sleep, + * so take a copy of the pvec and clear the stash for + * others to use it as we sleep. + */ + stack = *pvec; + pagevec_reinit(pvec); + spin_unlock(&vm->free_pages.lock); + pvec = &stack; set_pages_array_wb(pvec->pages, pvec->nr); + + spin_lock(&vm->free_pages.lock); } __pagevec_release(pvec); @@ -481,8 +522,35 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) * unconditional might_sleep() for everybody. */ might_sleep(); - if (!pagevec_add(&vm->free_pages, page)) + spin_lock(&vm->free_pages.lock); + if (!pagevec_add(&vm->free_pages.pvec, page)) vm_free_pages_release(vm, false); + spin_unlock(&vm->free_pages.lock); +} + +static void i915_address_space_init(struct i915_address_space *vm, + struct drm_i915_private *dev_priv) +{ + GEM_BUG_ON(!vm->total); + drm_mm_init(&vm->mm, 0, vm->total); + vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + + stash_init(&vm->free_pages); + + INIT_LIST_HEAD(&vm->active_list); + INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->unbound_list); +} + +static void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); } static int __setup_page_dma(struct i915_address_space *vm, @@ -493,8 +561,11 @@ static int __setup_page_dma(struct i915_address_space *vm, if (unlikely(!p->page)) return -ENOMEM; - p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); + p->daddr = dma_map_page_attrs(vm->dma, + p->page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { vm_free_page(vm, p->page); return -ENOMEM; @@ -575,8 +646,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) goto skip; - addr = dma_map_page(vm->dma, page, 0, size, - PCI_DMA_BIDIRECTIONAL); + addr = dma_map_page_attrs(vm->dma, + page, 0, size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, addr))) goto free_page; @@ -1562,6 +1636,8 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + kref_init(&ppgtt->ref); + ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; @@ -1569,6 +1645,8 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) 1ULL << 48 : 1ULL << 32; + i915_address_space_init(&ppgtt->vm, i915); + /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ @@ -1996,7 +2074,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) struct drm_i915_private *i915 = ppgtt->base.vm.i915; struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - int i; GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(size > ggtt->vm.total); @@ -2005,14 +2082,14 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], NULL); init_request_active(&vma->last_fence, NULL); vma->vm = &ggtt->vm; vma->ops = &pd_vma_ops; vma->private = ppgtt; + vma->active = RB_ROOT; + vma->size = size; vma->fence_size = size; vma->flags = I915_VMA_GGTT; @@ -2068,11 +2145,15 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + kref_init(&ppgtt->base.ref); + ppgtt->base.vm.i915 = i915; ppgtt->base.vm.dma = &i915->drm.pdev->dev; ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + i915_address_space_init(&ppgtt->base.vm, i915); + ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; @@ -2105,30 +2186,6 @@ err_free: return ERR_PTR(err); } -static void i915_address_space_init(struct i915_address_space *vm, - struct drm_i915_private *dev_priv, - const char *name) -{ - drm_mm_init(&vm->mm, 0, vm->total); - vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; - - INIT_LIST_HEAD(&vm->active_list); - INIT_LIST_HEAD(&vm->inactive_list); - INIT_LIST_HEAD(&vm->unbound_list); - - list_add_tail(&vm->global_link, &dev_priv->vm_list); - pagevec_init(&vm->free_pages); -} - -static void i915_address_space_fini(struct i915_address_space *vm) -{ - if (pagevec_count(&vm->free_pages)) - vm_free_pages_release(vm, true); - - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); -} - static void gtt_write_workarounds(struct drm_i915_private *dev_priv) { /* This function is for gtt related workarounds. This function is @@ -2199,8 +2256,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915) struct i915_hw_ppgtt * i915_ppgtt_create(struct drm_i915_private *i915, - struct drm_i915_file_private *fpriv, - const char *name) + struct drm_i915_file_private *fpriv) { struct i915_hw_ppgtt *ppgtt; @@ -2208,8 +2264,6 @@ i915_ppgtt_create(struct drm_i915_private *i915, if (IS_ERR(ppgtt)) return ppgtt; - kref_init(&ppgtt->ref); - i915_address_space_init(&ppgtt->vm, i915, name); ppgtt->vm.file = fpriv; trace_i915_ppgtt_create(&ppgtt->vm); @@ -2739,7 +2793,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0)) { + if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); @@ -2788,7 +2842,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_hw_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); + ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2918,7 +2972,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash; + pvec = &dev_priv->mm.wc_stash.pvec; if (pvec->nr) { set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); @@ -3518,7 +3572,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - INIT_LIST_HEAD(&dev_priv->vm_list); + stash_init(&dev_priv->mm.wc_stash); /* Note that we use page colouring to enforce a guard page at the * end of the address space. This is required as the CS may prefetch @@ -3526,7 +3580,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * and beyond the end of the GTT if we do not provide a guard. */ mutex_lock(&dev_priv->drm.struct_mutex); - i915_address_space_init(&ggtt->vm, dev_priv, "[global]"); + i915_address_space_init(&ggtt->vm, dev_priv); if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; mutex_unlock(&dev_priv->drm.struct_mutex); |