diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 120 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 177 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 101 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 94 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 302 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 |
17 files changed, 669 insertions, 377 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 615ce6d464fb..306f75700bf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -389,7 +389,6 @@ struct amdgpu_clock { * Fences. */ struct amdgpu_fence_driver { - struct amdgpu_ring *ring; uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ @@ -398,7 +397,7 @@ struct amdgpu_fence_driver { bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; - struct delayed_work lockup_work; + struct timer_list fallback_timer; wait_queue_head_t fence_queue; }; @@ -917,8 +916,8 @@ struct amdgpu_ring { #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_bo *bo; + uint64_t addr; }; struct amdgpu_vm_id { @@ -926,8 +925,6 @@ struct amdgpu_vm_id { uint64_t pd_gpu_addr; /* last flushed PD/PT update */ struct fence *flushed_updates; - /* last use of vmid */ - struct fence *last_id_use; }; struct amdgpu_vm { @@ -957,24 +954,70 @@ struct amdgpu_vm { /* for id and flush management per ring */ struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; + /* for interval tree */ + spinlock_t it_lock; }; struct amdgpu_vm_manager { - struct fence *active[AMDGPU_NUM_VM]; - uint32_t max_pfn; + struct { + struct fence *active; + atomic_long_t owner; + } ids[AMDGPU_NUM_VM]; + + uint32_t max_pfn; /* number of VMIDs */ - unsigned nvm; + unsigned nvm; /* vram base address for page table entry */ - u64 vram_base_offset; + u64 vram_base_offset; /* is vm enabled? */ - bool enabled; - /* for hw to save the PD addr on suspend/resume */ - uint32_t saved_table_addr[AMDGPU_NUM_VM]; + bool enabled; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; struct amdgpu_ring *vm_pte_funcs_ring; }; +void amdgpu_vm_manager_fini(struct amdgpu_device *adev); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); +struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct list_head *head); +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync); +void amdgpu_vm_flush(struct amdgpu_ring *ring, + struct amdgpu_vm *vm, + struct fence *updates); +void amdgpu_vm_fence(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct fence *fence); +uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +int amdgpu_vm_clear_freed(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync); +int amdgpu_vm_bo_update(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + struct ttm_mem_reg *mem); +void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, + struct amdgpu_bo *bo); +struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, + struct amdgpu_bo *bo); +struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo); +int amdgpu_vm_bo_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t addr, uint64_t offset, + uint64_t size, uint32_t flags); +int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t addr); +void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va); +int amdgpu_vm_free_job(struct amdgpu_job *job); + /* * context related structures */ @@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser { /* relocations */ struct amdgpu_bo_list_entry *vm_bos; struct list_head validated; + struct fence *fence; struct amdgpu_ib *ibs; uint32_t num_ibs; @@ -1226,7 +1270,7 @@ struct amdgpu_job { struct amdgpu_device *adev; struct amdgpu_ib *ibs; uint32_t num_ibs; - struct mutex job_lock; + void *owner; struct amdgpu_user_fence uf; int (*free_job)(struct amdgpu_job *job); }; @@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, @@ -2319,49 +2358,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* - * vm - */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *head); -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); -void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_fence *fence); -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm); -int amdgpu_vm_clear_freed(struct amdgpu_device *adev, - struct amdgpu_vm *vm); -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, - struct amdgpu_vm *vm, struct amdgpu_sync *sync); -int amdgpu_vm_bo_update(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); -void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo); -struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, - struct amdgpu_bo *bo); -struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_bo *bo); -int amdgpu_vm_bo_map(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - uint64_t addr, uint64_t offset, - uint64_t size, uint32_t flags); -int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - uint64_t addr); -void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va); -int amdgpu_vm_free_job(struct amdgpu_job *job); -/* * functions used by amdgpu_encoder.c */ struct amdgpu_afmt_acr { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dfc4d02c7a38..3afcf0237c25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs) -{ - struct amdgpu_cs_parser *parser; - int i; - - parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); - if (!parser) - return NULL; - - parser->adev = adev; - parser->filp = filp; - parser->ctx = ctx; - parser->ibs = ibs; - parser->num_ibs = num_ibs; - for (i = 0; i < num_ibs; i++) - ibs[i].ctx = ctx; - - return parser; -} - int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; } -static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) +/** + * cs_parser_fini() - clean parser states + * @parser: parser structure holding parsing context. + * @error: error number + * + * If error is set than unvalidate buffer, otherwise just free memory + * used by parsing context. + **/ +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { + unsigned i; + if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &parser->ibs[parser->num_ibs-1].fence->base); + &parser->validated, + parser->fence); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } -} + fence_put(parser->fence); -static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) -{ - unsigned i; if (parser->ctx) amdgpu_ctx_put(parser->ctx); if (parser->bo_list) @@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (!amdgpu_enable_scheduler) - { - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); - } - - kfree(parser); -} - -/** - * cs_parser_fini() - clean parser states - * @parser: parser structure holding parsing context. - * @error: error number - * - * If error is set than unvalidate buffer, otherwise just free memory - * used by parsing context. - **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) -{ - amdgpu_cs_parser_fini_early(parser, error, backoff); - amdgpu_cs_parser_fini_late(parser); + if (parser->ibs) + for (i = 0; i < parser->num_ibs; i++) + amdgpu_ib_free(parser->adev, &parser->ibs[i]); + kfree(parser->ibs); + if (parser->uf.bo) + drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, } r = amdgpu_bo_vm_update_pte(parser, vm); - if (r) { - goto out; - } - amdgpu_cs_sync_rings(parser); - if (!amdgpu_enable_scheduler) - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!r) + amdgpu_cs_sync_rings(parser); -out: return r; } @@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_cs *cs = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_cs_parser *parser; + struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; if (!adev->accel_working) return -EBUSY; - parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); - if (!parser) - return -ENOMEM; - r = amdgpu_cs_parser_init(parser, data); + parser.adev = adev; + parser.filp = filp; + + r = amdgpu_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(parser, r, false); + amdgpu_cs_parser_fini(&parser, r, false); r = amdgpu_cs_handle_lockup(adev, r); return r; } mutex_lock(&vm->mutex); - r = amdgpu_cs_parser_relocs(parser); + r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r && r != -ERESTARTSYS) DRM_ERROR("Failed to process the buffer list %d!\n", r); else if (!r) { reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, parser); + r = amdgpu_cs_ib_fill(adev, &parser); } if (!r) { - r = amdgpu_cs_dependencies(adev, parser); + r = amdgpu_cs_dependencies(adev, &parser); if (r) DRM_ERROR("Failed in the dependencies handling %d!\n", r); } @@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; - for (i = 0; i < parser->num_ibs; i++) - trace_amdgpu_cs(parser, i); + for (i = 0; i < parser.num_ibs; i++) + trace_amdgpu_cs(&parser, i); - r = amdgpu_cs_ib_vm_chunk(adev, parser); + r = amdgpu_cs_ib_vm_chunk(adev, &parser); if (r) goto out; - if (amdgpu_enable_scheduler && parser->num_ibs) { + if (amdgpu_enable_scheduler && parser.num_ibs) { + struct amdgpu_ring * ring = parser.ibs->ring; + struct amd_sched_fence *fence; struct amdgpu_job *job; - struct amdgpu_ring * ring = parser->ibs->ring; + job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { r = -ENOMEM; goto out; } + job->base.sched = &ring->sched; - job->base.s_entity = &parser->ctx->rings[ring->idx].entity; - job->adev = parser->adev; - job->ibs = parser->ibs; - job->num_ibs = parser->num_ibs; - job->base.owner = parser->filp; - mutex_init(&job->job_lock); + job->base.s_entity = &parser.ctx->rings[ring->idx].entity; + job->adev = parser.adev; + job->owner = parser.filp; + job->free_job = amdgpu_cs_free_job; + + job->ibs = parser.ibs; + job->num_ibs = parser.num_ibs; + parser.ibs = NULL; + parser.num_ibs = 0; + if (job->ibs[job->num_ibs - 1].user) { - memcpy(&job->uf, &parser->uf, - sizeof(struct amdgpu_user_fence)); + job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; + parser.uf.bo = NULL; } - job->free_job = amdgpu_cs_free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); + fence = amd_sched_fence_create(job->base.s_entity, + parser.filp); + if (!fence) { + r = -ENOMEM; amdgpu_cs_free_job(job); kfree(job); goto out; } - cs->out.handle = - amdgpu_ctx_add_fence(parser->ctx, ring, - &job->base.s_fence->base); - parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; + job->base.s_fence = fence; + parser.fence = fence_get(&fence->base); - list_sort(NULL, &parser->validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &job->base.s_fence->base); + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, + &fence->base); + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - mutex_unlock(&job->job_lock); - amdgpu_cs_parser_fini_late(parser); - mutex_unlock(&vm->mutex); - return 0; + trace_amdgpu_cs_ioctl(job); + amd_sched_entity_push_job(&job->base); + + } else { + struct amdgpu_fence *fence; + + r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, + parser.filp); + fence = parser.ibs[parser.num_ibs - 1].fence; + parser.fence = fence_get(&fence->base); + cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; } - cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(parser, r, reserved_buffers); + amdgpu_cs_parser_fini(&parser, r, reserved_buffers); mutex_unlock(&vm->mutex); r = amdgpu_cs_handle_lockup(adev, r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 257d72205bb5..3671f9f220bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -47,6 +47,9 @@ * that the the relevant GPU caches have been flushed. */ +static struct kmem_cache *amdgpu_fence_slab; +static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); + /** * amdgpu_fence_write - write a fence value * @@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) } /** - * amdgpu_fence_schedule_check - schedule lockup check - * - * @ring: pointer to struct amdgpu_ring - * - * Queues a delayed work item to check for lockups. - */ -static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) -{ - /* - * Do not reset the timer here with mod_delayed_work, - * this can livelock in an interaction with TTM delayed destroy. - */ - queue_delayed_work(system_power_efficient_wq, - &ring->fence_drv.lockup_work, - AMDGPU_FENCE_JIFFIES_TIMEOUT); -} - -/** * amdgpu_fence_emit - emit a fence on the requested ring * * @ring: ring the fence is associated with @@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_device *adev = ring->adev; /* we are protected by the ring emission mutex */ - *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); if ((*fence) == NULL) { return -ENOMEM; } @@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, AMDGPU_FENCE_FLAG_INT); - trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); return 0; } /** + * amdgpu_fence_schedule_fallback - schedule fallback check + * + * @ring: pointer to struct amdgpu_ring + * + * Start a timer as fallback to our interrupts. + */ +static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) +{ + mod_timer(&ring->fence_drv.fallback_timer, + jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); +} + +/** * amdgpu_fence_activity - check for fence activity * * @ring: pointer to struct amdgpu_ring @@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); if (seq < last_emitted) - amdgpu_fence_schedule_check(ring); + amdgpu_fence_schedule_fallback(ring); return wake; } /** - * amdgpu_fence_check_lockup - check for hardware lockup + * amdgpu_fence_process - process a fence * - * @work: delayed work item + * @adev: amdgpu_device pointer + * @ring: ring index the fence is associated with * - * Checks for fence activity and if there is none probe - * the hardware if a lockup occured. + * Checks the current fence value and wakes the fence queue + * if the sequence number has increased (all asics). */ -static void amdgpu_fence_check_lockup(struct work_struct *work) +void amdgpu_fence_process(struct amdgpu_ring *ring) { - struct amdgpu_fence_driver *fence_drv; - struct amdgpu_ring *ring; - - fence_drv = container_of(work, struct amdgpu_fence_driver, - lockup_work.work); - ring = fence_drv->ring; - if (amdgpu_fence_activity(ring)) wake_up_all(&ring->fence_drv.fence_queue); } /** - * amdgpu_fence_process - process a fence + * amdgpu_fence_fallback - fallback for hardware interrupts * - * @adev: amdgpu_device pointer - * @ring: ring index the fence is associated with + * @work: delayed work item * - * Checks the current fence value and wakes the fence queue - * if the sequence number has increased (all asics). + * Checks for fence activity. */ -void amdgpu_fence_process(struct amdgpu_ring *ring) +static void amdgpu_fence_fallback(unsigned long arg) { - if (amdgpu_fence_activity(ring)) - wake_up_all(&ring->fence_drv.fence_queue); + struct amdgpu_ring *ring = (void *)arg; + + amdgpu_fence_process(ring); } /** @@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) if (atomic64_read(&ring->fence_drv.last_seq) >= seq) return 0; - amdgpu_fence_schedule_check(ring); + amdgpu_fence_schedule_fallback(ring); wait_event(ring->fence_drv.fence_queue, ( (signaled = amdgpu_fence_seq_signaled(ring, seq)))); @@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) atomic64_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; - INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, - amdgpu_fence_check_lockup); - ring->fence_drv.ring = ring; + setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, + (unsigned long)ring); init_waitqueue_head(&ring->fence_drv.fence_queue); @@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { + if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { + amdgpu_fence_slab = kmem_cache_create( + "amdgpu_fence", sizeof(struct amdgpu_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!amdgpu_fence_slab) + return -ENOMEM; + } if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) { int i, r; + if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) + kmem_cache_destroy(amdgpu_fence_slab); mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) continue; r = amdgpu_fence_wait_empty(ring); @@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); amd_sched_fini(&ring->sched); + del_timer_sync(&ring->fence_drv.fallback_timer); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); @@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) fence->fence_wake.func = amdgpu_fence_check_signaled; __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); fence_get(f); - amdgpu_fence_schedule_check(ring); + if (!timer_pending(&ring->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(ring); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } +static void amdgpu_fence_release(struct fence *f) +{ + struct amdgpu_fence *fence = to_amdgpu_fence(f); + kmem_cache_free(amdgpu_fence_slab, fence); +} + const struct fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling, .signaled = amdgpu_fence_is_signaled, .wait = fence_default_wait, - .release = NULL, + .release = amdgpu_fence_release, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 087332858853..00c5b580f56c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (domain == AMDGPU_GEM_DOMAIN_CPU) goto error_unreserve; } + r = amdgpu_vm_update_page_directory(adev, bo_va->vm); + if (r) + goto error_unreserve; r = amdgpu_vm_clear_freed(adev, bo_va->vm); if (r) @@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *rbo; struct amdgpu_bo_va *bo_va; + struct ttm_validate_buffer tv, tv_pd; + struct ww_acquire_ctx ticket; + struct list_head list, duplicates; uint32_t invalid_flags, va_flags = 0; int r = 0; @@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -ENOENT; mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); - r = amdgpu_bo_reserve(rbo, false); + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); + tv.bo = &rbo->tbo; + tv.shared = true; + list_add(&tv.head, &list); + + if (args->operation == AMDGPU_VA_OP_MAP) { + tv_pd.bo = &fpriv->vm.page_directory->tbo; + tv_pd.shared = true; + list_add(&tv_pd.head, &list); + } + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); @@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { - amdgpu_bo_unreserve(rbo); + ttm_eu_backoff_reservation(&ticket, &list); + drm_gem_object_unreference_unlocked(gobj); mutex_unlock(&fpriv->vm.mutex); return -ENOENT; } @@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - + ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); mutex_unlock(&fpriv->vm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index e65987743871..9e25edafa721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, int r; if (size) { - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); @@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, } if (ib->vm) - amdgpu_vm_fence(adev, ib->vm, ib->fence); + amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); amdgpu_ring_unlock_commit(ring); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 3c2ff4567798..ea756e77b023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); -int amdgpu_sa_bo_new(struct amdgpu_device *adev, - struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align); +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, + struct amdgpu_sa_bo **sa_bo, + unsigned size, unsigned align); void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, struct fence *fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 0212b31dc194..8b88edb0434b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, return false; } -int amdgpu_sa_bo_new(struct amdgpu_device *adev, - struct amdgpu_sa_manager *sa_manager, +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, struct amdgpu_sa_bo **sa_bo, unsigned size, unsigned align) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <drm/drmP.h> #include "amdgpu.h" +#include "amdgpu_trace.h" static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { @@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); - mutex_lock(&job->job_lock); - r = amdgpu_ib_schedule(job->adev, - job->num_ibs, - job->ibs, - job->base.owner); + trace_amdgpu_sched_run_job(job); + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; @@ -61,8 +59,6 @@ err: if (job->free_job) job->free_job(job); - mutex_unlock(&job->job_lock); - fence_put(&job->base.s_fence->base); kfree(job); return fence ? &fence->base : NULL; } @@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) { + kfree(job); + return -ENOMEM; + } + *f = fence_get(&job->base.s_fence->base); + job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; - job->base.owner = owner; - mutex_init(&job->job_lock); + job->owner = owner; job->free_job = free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); - kfree(job); - return r; - } - *f = fence_get(&job->base.s_fence->base); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c index ff3ca52ec6fe..1caaf201b708 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c @@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev, if (*semaphore == NULL) { return -ENOMEM; } - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &(*semaphore)->sa_bo, 8, 8); if (r) { kfree(*semaphore); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6697fd05217..dd005c336c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || - (count >= AMDGPU_NUM_SYNCS)) { + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { + r = fence_wait(&fence->base, true); + if (r) + return r; + continue; + } + + if (count >= AMDGPU_NUM_SYNCS) { /* not enough room, wait manually */ r = fence_wait(&fence->base, false); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76ecbaf72a2e..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, __entry->fences) ); +TRACE_EVENT(amdgpu_cs_ioctl, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + +TRACE_EVENT(amdgpu_sched_run_job, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + + TRACE_EVENT(amdgpu_vm_grab_id, TP_PROTO(unsigned vmid, int ring), TP_ARGS(vmid, ring), @@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) ); -DECLARE_EVENT_CLASS(amdgpu_fence_request, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno), - - TP_STRUCT__entry( - __field(u32, dev) - __field(int, ring) - __field(u32, seqno) - ), - - TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->ring = ring; - __entry->seqno = seqno; - ), - - TP_printk("dev=%u, ring=%d, seqno=%u", - __entry->dev, __entry->ring, __entry->seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - DECLARE_EVENT_CLASS(amdgpu_semaphore_request, TP_PROTO(int ring, struct amdgpu_semaphore *sem), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 81bb8e9fc26d..d4bac5f49939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); if (ttm_pl == TTM_PL_VRAM) - seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", + seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", adev->mman.bdev.man[ttm_pl].size, - atomic64_read(&adev->vram_usage) >> 20, - atomic64_read(&adev->vram_vis_usage) >> 20); + (u64)atomic64_read(&adev->vram_usage) >> 20, + (u64)atomic64_read(&adev->vram_vis_usage) >> 20); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 633a32a48560..159ce54bbd8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned i; /* check if the id is still valid */ - if (vm_id->id && vm_id->last_id_use && - vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); - return 0; + if (vm_id->id) { + unsigned id = vm_id->id; + long owner; + + owner = atomic_long_read(&adev->vm_manager.ids[id].owner); + if (owner == (long)vm) { + trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); + return 0; + } } /* we definately need to flush */ @@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* skip over VMID 0, since it is the system VM */ for (i = 1; i < adev->vm_manager.nvm; ++i) { - struct fence *fence = adev->vm_manager.active[i]; + struct fence *fence = adev->vm_manager.ids[i].active; struct amdgpu_ring *fring; if (fence == NULL) { @@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (choices[i]) { struct fence *fence; - fence = adev->vm_manager.active[choices[i]]; + fence = adev->vm_manager.ids[choices[i]].active; vm_id->id = choices[i]; trace_amdgpu_vm_grab_id(choices[i], ring->idx); @@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct fence *flushed_updates = vm_id->flushed_updates; - bool is_earlier = false; - - if (flushed_updates && updates) { - BUG_ON(flushed_updates->context != updates->context); - is_earlier = (updates->seqno - flushed_updates->seqno <= - INT_MAX) ? true : false; - } + bool is_later; - if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || - is_earlier) { + if (!flushed_updates) + is_later = true; + else if (!updates) + is_later = false; + else + is_later = fence_is_later(updates, flushed_updates); + if (pd_addr != vm_id->pd_gpu_addr || is_later) { trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - if (is_earlier) { + if (is_later) { vm_id->flushed_updates = fence_get(updates); fence_put(flushed_updates); } - if (!flushed_updates) - vm_id->flushed_updates = fence_get(updates); vm_id->pd_gpu_addr = pd_addr; amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); } @@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, */ void amdgpu_vm_fence(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_fence *fence) + struct fence *fence) { - unsigned ridx = fence->ring->idx; - unsigned vm_id = vm->ids[ridx].id; - - fence_put(adev->vm_manager.active[vm_id]); - adev->vm_manager.active[vm_id] = fence_get(&fence->base); + struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); + unsigned vm_id = vm->ids[ring->idx].id; - fence_put(vm->ids[ridx].last_id_use); - vm->ids[ridx].last_id_use = fence_get(&fence->base); + fence_put(adev->vm_manager.ids[vm_id].active); + adev->vm_manager.ids[vm_id].active = fence_get(fence); + atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); } /** @@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) * * @adev: amdgpu_device pointer * @bo: bo to clear + * + * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) @@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - r = amdgpu_bo_reserve(bo, false); - if (r) - return r; - r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) - goto error_unreserve; + goto error; addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); if (!ib) - goto error_unreserve; + goto error; r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); if (r) @@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (!r) amdgpu_bo_fence(bo, fence, true); fence_put(fence); - if (amdgpu_enable_scheduler) { - amdgpu_bo_unreserve(bo); + if (amdgpu_enable_scheduler) return 0; - } + error_free: amdgpu_ib_free(adev, ib); kfree(ib); -error_unreserve: - amdgpu_bo_unreserve(bo); +error: return r; } @@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, * Add a mapping of the BO at the specefied addr into the VM. * Returns 0 for success, error for failure. * - * Object has to be reserved and gets unreserved by this function! + * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, @@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) { - amdgpu_bo_unreserve(bo_va->bo); + size == 0 || size & AMDGPU_GPU_PAGE_MASK) return -EINVAL; - } /* make sure object fit at this offset */ eaddr = saddr + size; - if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { - amdgpu_bo_unreserve(bo_va->bo); + if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) return -EINVAL; - } last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; if (last_pfn > adev->vm_manager.max_pfn) { dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", last_pfn, adev->vm_manager.max_pfn); - amdgpu_bo_unreserve(bo_va->bo); return -EINVAL; } saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; + spin_lock(&vm->it_lock); it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); + spin_unlock(&vm->it_lock); if (it) { struct amdgpu_bo_va_mapping *tmp; tmp = container_of(it, struct amdgpu_bo_va_mapping, it); @@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, tmp->it.start, tmp->it.last + 1); - amdgpu_bo_unreserve(bo_va->bo); r = -EINVAL; goto error; } mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) { - amdgpu_bo_unreserve(bo_va->bo); r = -ENOMEM; goto error; } @@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->flags = flags; list_add(&mapping->list, &bo_va->invalids); + spin_lock(&vm->it_lock); interval_tree_insert(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_map(bo_va, mapping); /* Make sure the page tables are allocated */ @@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (eaddr > vm->max_pde_used) vm->max_pde_used = eaddr; - amdgpu_bo_unreserve(bo_va->bo); - /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { struct reservation_object *resv = vm->page_directory->tbo.resv; @@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (vm->page_tables[pt_idx].bo) continue; - ww_mutex_lock(&resv->lock, NULL); r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, resv, &pt); - ww_mutex_unlock(&resv->lock); if (r) goto error_free; @@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, error_free: list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); kfree(mapping); @@ -1119,7 +1110,7 @@ error: * Remove a mapping of the BO at the specefied addr from the VM. * Returns 0 for success, error for failure. * - * Object has to be reserved and gets unreserved by this function! + * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, @@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, break; } - if (&mapping->list == &bo_va->invalids) { - amdgpu_bo_unreserve(bo_va->bo); + if (&mapping->list == &bo_va->invalids) return -ENOENT; - } } list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) list_add(&mapping->list, &vm->freed); else kfree(mapping); - amdgpu_bo_unreserve(bo_va->bo); return 0; } @@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); kfree(mapping); } @@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { vm->ids[i].id = 0; vm->ids[i].flushed_updates = NULL; - vm->ids[i].last_id_use = NULL; } mutex_init(&vm->mutex); vm->va = RB_ROOT; @@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); - + spin_lock_init(&vm->it_lock); pd_size = amdgpu_vm_directory_size(adev); pd_entries = amdgpu_vm_num_pdes(adev); @@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) NULL, NULL, &vm->page_directory); if (r) return r; - + r = amdgpu_bo_reserve(vm->page_directory, false); + if (r) { + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + return r; + } r = amdgpu_vm_clear_bo(adev, vm->page_directory); + amdgpu_bo_unreserve(vm->page_directory); if (r) { amdgpu_bo_unref(&vm->page_directory); vm->page_directory = NULL; @@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + unsigned id = vm->ids[i].id; + + atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, + (long)vm, 0); fence_put(vm->ids[i].flushed_updates); - fence_put(vm->ids[i].last_id_use); } mutex_destroy(&vm->mutex); } + +/** + * amdgpu_vm_manager_fini - cleanup VM manager + * + * @adev: amdgpu_device pointer + * + * Cleanup the VM manager and free resources. + */ +void amdgpu_vm_manager_fini(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < AMDGPU_NUM_VM; ++i) + fence_put(adev->vm_manager.ids[i].active); +} diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a1a35a5df8e7..57a2e347f04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; default: @@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6776cf756d40..e1dcab98e249 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, - mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, @@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, - mmPCIE_INDEX, 0xffffffff, 0x0140001c, - mmPCIE_DATA, 0x000f0000, 0x00000000, - mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, - mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; @@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.max_cu_per_sh = 16; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 4; - adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_texture_channel_caches = 16; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 32; adev->gfx.config.max_hw_contexts = 8; @@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } case CHIP_FIJI: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 7: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 7: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; case CHIP_TONGA: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { @@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_TONGA: - case CHIP_FIJI: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x0000002A); break; + case CHIP_FIJI: + amdgpu_ring_write(ring, 0x3a00161a); + amdgpu_ring_write(ring, 0x0000002e); + break; case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 85bbcdc73fff..7427d8cd4c43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -40,7 +40,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); -MODULE_FIRMWARE("radeon/boniare_mc.bin"); +MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); /** @@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); WREG32(mmVM_L2_CNTL, tmp); tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); @@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle) static int gmc_v7_0_sw_fini(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v7_0_vm_fini(adev); adev->vm_manager.enabled = false; } @@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle) static int gmc_v7_0_suspend(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v7_0_vm_fini(adev); adev->vm_manager.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1bcc4e74e3b4..cb0e50ebb528 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); WREG32(mmVM_L2_CNTL, tmp); tmp = RREG32(mmVM_L2_CNTL2); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); @@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle) static int gmc_v8_0_sw_fini(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v8_0_vm_fini(adev); adev->vm_manager.enabled = false; } @@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle) static int gmc_v8_0_suspend(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v8_0_vm_fini(adev); adev->vm_manager.enabled = false; } |