diff options
Diffstat (limited to 'drivers/gpu/drm')
131 files changed, 1552 insertions, 1035 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 668939a14206..0d13e6368b96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size; extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; +extern int amdgpu_enable_semaphores; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); @@ -890,7 +891,7 @@ struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler *scheduler; + struct amd_gpu_scheduler sched; spinlock_t fence_lock; struct mutex *ring_lock; @@ -1201,8 +1202,6 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_inst_irq; /* gfx status */ uint32_t gfx_current_status; - /* sync signal for const engine */ - unsigned ce_sync_offs; /* ce ram size*/ unsigned ce_ram_size; }; @@ -1274,8 +1273,10 @@ struct amdgpu_job { uint32_t num_ibs; struct mutex job_lock; struct amdgpu_user_fence uf; - int (*free_job)(struct amdgpu_job *sched_job); + int (*free_job)(struct amdgpu_job *job); }; +#define to_amdgpu_job(sched_job) \ + container_of((sched_job), struct amdgpu_job, base) static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) { @@ -1653,6 +1654,7 @@ struct amdgpu_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct amdgpu_dpm dpm; const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 496ed2192eba..84d68d658f8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return -ENOMEM; r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); if (r) { dev_err(rdev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 77f1d7c6ea3a..9416e0f5c1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) /* disp clock */ adev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (adev->clock.default_dispclk == 0) - adev->clock.default_dispclk = 54000; /* 540 Mhz */ + /* set a reasonable default for DP */ + if (adev->clock.default_dispclk < 53900) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; + } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); adev->clock.current_dispclk = adev->clock.default_dispclk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 98d59ee640ce..cd639c362df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, int time; n = AMDGPU_BENCHMARK_ITERATIONS; - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, + NULL, &sobj); if (r) { goto out_cleanup; } @@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, if (r) { goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, + NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 6b1243f9f86d..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); if (ret) return ret; ret = amdgpu_bo_reserve(bo, false); @@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, true, domain, flags, - NULL, &placement, &obj); + NULL, &placement, NULL, + &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3b355aeb62fd..fd16652aa277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; - uint64_t *chunk_array = NULL; + uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; - int r = 0; + unsigned size; + int i; + int ret; - if (!cs->in.num_chunks) - goto out; + if (cs->in.num_chunks == 0) + return 0; + + chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); if (!p->ctx) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_chunk; } + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); - if (chunk_array == NULL) { - r = -ENOMEM; - goto out; - } - - chunk_array_user = (uint64_t __user *)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto put_bo_list; } p->nchunks = cs->in.num_chunks; p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), GFP_KERNEL); - if (p->chunks == NULL) { - r = -ENOMEM; - goto out; + if (!p->chunks) { + ret = -ENOMEM; + goto put_bo_list; } for (i = 0; i < p->nchunks; i++) { @@ -197,28 +197,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)chunk_array[i]; + chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { - r = -EFAULT; - goto out; + ret = -EFAULT; + i--; + goto free_partial_kdata; } p->chunks[i].chunk_id = user_chunk.chunk_id; p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)user_chunk.chunk_data; + cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { - r = -ENOMEM; - goto out; + ret = -ENOMEM; + i--; + goto free_partial_kdata; } size *= sizeof(uint32_t); if (copy_from_user(p->chunks[i].kdata, cdata, size)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto free_partial_kdata; } switch (p->chunks[i].chunk_id) { @@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, handle); if (gobj == NULL) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } p->uf.bo = gem_to_amdgpu_bo(gobj); p->uf.offset = fence_data->offset; } else { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } break; @@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break; default: - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } } p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!p->ibs) - r = -ENOMEM; + if (!p->ibs) { + ret = -ENOMEM; + goto free_all_kdata; + } -out: kfree(chunk_array); - return r; + return 0; + +free_all_kdata: + i = p->nchunks - 1; +free_partial_kdata: + for (; i >= 0; i--) + drm_free_large(p->chunks[i].kdata); + kfree(p->chunks); +put_bo_list: + if (p->bo_list) + amdgpu_bo_list_put(p->bo_list); + amdgpu_ctx_put(p->ctx); +free_chunk: + kfree(chunk_array); + + return ret; } /* Returns how many bytes TTM can move per IB. @@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } -int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) +int amdgpu_cs_list_validate(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct list_head *validated) { - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_device *adev = p->adev; struct amdgpu_bo_list_entry *lobj; - struct list_head duplicates; struct amdgpu_bo *bo; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); int r; - INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) { - return r; - } - - list_for_each_entry(lobj, &p->validated, tv.head) { + list_for_each_entry(lobj, validated, tv.head) { bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; @@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) domain = lobj->allowed_domains; goto retry; } - ttm_eu_backoff_reservation(&p->ticket, &p->validated); return r; } } @@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_cs_buckets buckets; + struct list_head duplicates; bool need_mmap_lock = false; int i, r; @@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (need_mmap_lock) down_read(¤t->mm->mmap_sem); - r = amdgpu_cs_list_validate(p); + INIT_LIST_HEAD(&duplicates); + r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); + if (unlikely(r != 0)) + goto error_reserve; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + if (r) + goto error_validate; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + +error_validate: + if (r) + ttm_eu_backoff_reservation(&p->ticket, &p->validated); +error_reserve: if (need_mmap_lock) up_read(¤t->mm->mmap_sem); @@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return 0; } -static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) +static int amdgpu_cs_free_job(struct amdgpu_job *job) { int i; - if (sched_job->ibs) - for (i = 0; i < sched_job->num_ibs; i++) - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); - kfree(sched_job->ibs); - if (sched_job->uf.bo) - drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); + if (job->ibs) + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); + if (job->uf.bo) + drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); return 0; } @@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(parser, r, false); + kfree(parser); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; @@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) return -ENOMEM; - job->base.sched = ring->scheduler; + job->base.sched = &ring->sched; job->base.s_entity = &parser->ctx->rings[ring->idx].entity; job->adev = parser->adev; job->ibs = parser->ibs; @@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->free_job = amdgpu_cs_free_job; mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job((struct amd_sched_job *)job); + r = amd_sched_entity_push_job(&job->base); if (r) { mutex_unlock(&job->job_lock); amdgpu_cs_free_job(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 20cbc4eb5a6f..e0b80ccdfe8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, for (i = 0; i < adev->num_rings; i++) { struct amd_sched_rq *rq; if (kernel) - rq = &adev->rings[i]->scheduler->kernel_rq; + rq = &adev->rings[i]->sched.kernel_rq; else - rq = &adev->rings[i]->scheduler->sched_rq; - r = amd_sched_entity_init(adev->rings[i]->scheduler, + rq = &adev->rings[i]->sched.sched_rq; + r = amd_sched_entity_init(&adev->rings[i]->sched, &ctx->rings[i].entity, rq, amdgpu_sched_jobs); if (r) @@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, if (i < adev->num_rings) { for (j = 0; j < i; j++) - amd_sched_entity_fini(adev->rings[j]->scheduler, + amd_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx); return r; @@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) if (amdgpu_enable_scheduler) { for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(adev->rings[i]->scheduler, + amd_sched_entity_fini(&adev->rings[i]->sched, &ctx->rings[i].entity); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6ff6ae945794..6068d8207d10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->vram_scratch.robj); + NULL, NULL, &adev->vram_scratch.robj); if (r) { return r; } @@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) if (adev->wb.wb_obj == NULL) { r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + &adev->wb.wb_obj); if (r) { dev_warn(adev->dev, "(%d) create WB bo failed\n", r); return r; @@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); /* turn off display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } + drm_modeset_unlock_all(dev); /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (fbcon) { drm_helper_resume_force_mode(dev); /* turn on display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e3d70772b531..6c9e0902a414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ @@ -186,10 +184,6 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, goto cleanup; } - fence_get(work->excl); - for (i = 0; i < work->shared_count; ++i) - fence_get(work->shared[i]); - amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); amdgpu_bo_unreserve(new_rbo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0fcc0bd1622c..b190c2a83680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0; int amdgpu_enable_scheduler = 0; int amdgpu_sched_jobs = 16; int amdgpu_sched_hw_submission = 2; +int amdgpu_enable_semaphores = 1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); +MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); +module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -238,11 +242,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8a122b1b7786..96290d9cddca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) return true; return false; } + +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) +{ + struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!afbdev) + return; + + fb_helper = &afbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1be2bd6d07ea..b3fc26c59787 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). */ -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { - int i; + int i, r; ring->fence_drv.cpu_addr = NULL; ring->fence_drv.gpu_addr = 0; @@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) amdgpu_fence_check_lockup); ring->fence_drv.ring = ring; + init_waitqueue_head(&ring->fence_drv.fence_queue); + if (amdgpu_enable_scheduler) { - ring->scheduler = amd_sched_create(&amdgpu_sched_ops, - ring->idx, - amdgpu_sched_hw_submission, - (void *)ring->adev); - if (!ring->scheduler) - DRM_ERROR("Failed to create scheduler on ring %d.\n", - ring->idx); + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + amdgpu_sched_hw_submission, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; + } } + + return 0; } /** @@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) wake_up_all(&ring->fence_drv.fence_queue); amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); - if (ring->scheduler) - amd_sched_destroy(ring->scheduler); + amd_sched_fini(&ring->sched); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index cbd3a486c5c2..7312d729d300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gart.robj); + NULL, NULL, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5839fab374bf..7297ca3a0ba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, } } retry: - r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); + r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, &args->data.data_size_bytes, &args->data.flags); } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { + if (args->data.data_size_bytes > sizeof(args->data.data)) { + r = -EINVAL; + goto unreserve; + } r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); if (!r) r = amdgpu_bo_set_metadata(robj, args->data.data, @@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, args->data.flags); } +unreserve: amdgpu_bo_unreserve(robj); out: drm_gem_object_unreference_unlocked(gobj); @@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; struct ww_acquire_ctx ticket; - struct list_head list; + struct list_head list, duplicates; unsigned domain; int r; INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); tv.bo = &bo_va->bo->tbo; tv.shared = true; @@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + /* Provide duplicates to avoid -EALREADY */ + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_free; @@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int r; args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); - args->size = args->pitch * args->height; + args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); r = amdgpu_gem_object_create(adev, args->size, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 5c8a803acedc..534fc04e80fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &adev->irq.ih.ring_obj); + NULL, NULL, &adev->irq.ih.ring_obj); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 0aba8e9bc8a0..7c42ff670080 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev) */ int amdgpu_irq_postinstall(struct drm_device *dev) { - dev->max_vblank_count = 0x001fffff; + dev->max_vblank_count = 0x00ffffff; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 22367939ebf1..5d11e798230c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size = info->read_mmr_reg.count * 4; + unsigned n, alloc_size; uint32_t *regs; unsigned se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & @@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; - regs = kmalloc(alloc_size, GFP_KERNEL); + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; + alloc_size = info->read_mmr_reg.count * sizeof(*regs); for (i = 0; i < info->read_mmr_reg.count; i++) if (amdgpu_asic_read_register(adev, se_num, sh_num, @@ -484,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * amdgpu_driver_firstopen_kms - drm callback for last close + * amdgpu_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -492,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void amdgpu_driver_lastclose_kms(struct drm_device *dev) { + struct amdgpu_device *adev = dev->dev_private; + + amdgpu_fbdev_restore_mode(adev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52e65..7bd470d9ac30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); int amdgpu_fbdev_total_size(struct amdgpu_device *adev); bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 08b09d55b96f..1a7708f365f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; @@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, /* Kernel allocation are uninterruptible */ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); + acc_size, sg, resv, &amdgpu_ttm_bo_destroy); if (unlikely(r != 0)) { return r; } @@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, - struct sg_table *sg, struct amdgpu_bo **bo_ptr) + struct sg_table *sg, + struct reservation_object *resv, + struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; @@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, - kernel, domain, flags, - sg, - &placement, - bo_ptr); + return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (metadata == NULL) return -EINVAL; - buffer = kzalloc(metadata_size, GFP_KERNEL); + buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; - memcpy(buffer, metadata, metadata_size); - kfree(bo->metadata); bo->metadata_flags = flags; bo->metadata = buffer; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 6ea18dcec561..3c2ff4567798 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index efed11509f4a..22a8c7d3a3ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -691,6 +695,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { int ret; + if (adev->pm.sysfs_initialized) + return 0; + if (adev->pm.funcs->get_temperature == NULL) return 0; adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, @@ -719,6 +726,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } + adev->pm.sysfs_initialized = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index d9652fe32d6a..59f735a933a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { + struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; int ret; + ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 9bec91484c24..30dce235ddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - amdgpu_fence_driver_init_ring(ring); + r = amdgpu_fence_driver_init_ring(ring); + if (r) + return r; } - init_waitqueue_head(&ring->fence_drv.fence_queue); - r = amdgpu_wb_get(adev, &ring->rptr_offs); if (r) { dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); @@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, if (ring->ring_obj == NULL) { r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &ring->ring_obj); + NULL, NULL, &ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 74dad270362c..e90712443fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); } - r = amdgpu_bo_create(adev, size, align, true, - domain, 0, NULL, &sa_manager->bo); + r = amdgpu_bo_create(adev, size, align, true, domain, + 0, NULL, NULL, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; @@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) struct amd_sched_fence *s_fence; s_fence = to_amd_sched_fence(f); - if (s_fence) - return s_fence->scheduler->ring_id; + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->idx; + } + a_fence = to_amdgpu_fence(f); if (a_fence) return a_fence->ring->idx; @@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, } #if defined(CONFIG_DEBUG_FS) + +static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(fence); + struct amd_sched_fence *s_fence = to_amd_sched_fence(fence); + + if (a_fence) + seq_printf(m, " protected by 0x%016llx on ring %d", + a_fence->seq, a_fence->ring->idx); + + if (s_fence) { + struct amdgpu_ring *ring; + + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + seq_printf(m, " protected by 0x%016x on ring %d", + s_fence->base.seqno, ring->idx); + } +} + void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m) { @@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, } seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", soffset, eoffset, eoffset - soffset); - if (i->fence) { - struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); - struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence); - if (a_fence) - seq_printf(m, " protected by 0x%016llx on ring %d", - a_fence->seq, a_fence->ring->idx); - if (s_fence) - seq_printf(m, " protected by 0x%016x on ring %d", - s_fence->base.seqno, - s_fence->scheduler->ring_id); - - } + if (i->fence) + amdgpu_sa_bo_dump_fence(i->fence, m); seq_printf(m, "\n"); } spin_unlock(&sa_manager->wq.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index de98fbd2971e..2e946b2cad88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -27,63 +27,48 @@ #include <drm/drmP.h> #include "amdgpu.h" -static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) +static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { - struct amdgpu_job *sched_job = (struct amdgpu_job *)job; - return amdgpu_sync_get_fence(&sched_job->ibs->sync); + struct amdgpu_job *job = to_amdgpu_job(sched_job); + return amdgpu_sync_get_fence(&job->ibs->sync); } -static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) +static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) { - struct amdgpu_job *sched_job; - struct amdgpu_fence *fence; + struct amdgpu_fence *fence = NULL; + struct amdgpu_job *job; int r; - if (!job) { + if (!sched_job) { DRM_ERROR("job is null\n"); return NULL; } - sched_job = (struct amdgpu_job *)job; - mutex_lock(&sched_job->job_lock); - r = amdgpu_ib_schedule(sched_job->adev, - sched_job->num_ibs, - sched_job->ibs, - sched_job->base.owner); - if (r) + job = to_amdgpu_job(sched_job); + mutex_lock(&job->job_lock); + r = amdgpu_ib_schedule(job->adev, + job->num_ibs, + job->ibs, + job->base.owner); + if (r) { + DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; - fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); - - if (sched_job->free_job) - sched_job->free_job(sched_job); + } - mutex_unlock(&sched_job->job_lock); - return &fence->base; + fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); err: - DRM_ERROR("Run job error\n"); - mutex_unlock(&sched_job->job_lock); - job->sched->ops->process_job(job); - return NULL; -} + if (job->free_job) + job->free_job(job); -static void amdgpu_sched_process_job(struct amd_sched_job *job) -{ - struct amdgpu_job *sched_job; - - if (!job) { - DRM_ERROR("job is null\n"); - return; - } - sched_job = (struct amdgpu_job *)job; - /* after processing job, free memory */ - fence_put(&sched_job->base.s_fence->base); - kfree(sched_job); + mutex_unlock(&job->job_lock); + fence_put(&job->base.s_fence->base); + kfree(job); + return fence ? &fence->base : NULL; } struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_sched_dependency, .run_job = amdgpu_sched_run_job, - .process_job = amdgpu_sched_process_job }; int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, @@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) return -ENOMEM; - job->base.sched = ring->scheduler; + job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; job->adev = adev; job->ibs = ibs; @@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, mutex_init(&job->job_lock); job->free_job = free_job; mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job((struct amd_sched_job *)job); + r = amd_sched_entity_push_job(&job->base); if (r) { mutex_unlock(&job->job_lock); kfree(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 068aeaff7183..4921de15b451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) if (a_fence) return a_fence->ring->adev == adev; - if (s_fence) - return (struct amdgpu_device *)s_fence->scheduler->priv == adev; + + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->adev == adev; + } + return false; } @@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync) fence_put(e->fence); kfree(e); } + + if (amdgpu_enable_semaphores) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_fence *fence = sync->sync_to[i]; + if (!fence) + continue; + + r = fence_wait(&fence->base, false); + if (r) + return r; + } + return 0; } @@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || + (count >= AMDGPU_NUM_SYNCS)) { /* not enough room, wait manually */ r = fence_wait(&fence->base, false); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index f80b1a43be8a..4865615e9c06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &vram_obj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, + NULL, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + NULL, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b5abd5cde413..364cbe975332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->stollen_vga_memory); + NULL, NULL, &adev->stollen_vga_memory); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 482e66797ae6..5cc95f1a7dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) const struct common_firmware_header *header = NULL; err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); err = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2cf6c6b06e3b..d0312364d950 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->uvd.vcpu_bo); + NULL, NULL, &adev->uvd.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, return -EINVAL; } - if (msg_type == 1) { + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + amdgpu_bo_kunmap(bo); + + /* try to alloc a new handle */ + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&adev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { + adev->uvd.filp[i] = ctx->parser->filp; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: /* it's a decode msg, calc buffer sizes */ r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); amdgpu_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* validate the handle */ + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&adev->uvd.handles[i]) == handle) { + if (adev->uvd.filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; - } else { - /* it's a create msg */ - amdgpu_bo_kunmap(bo); - - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) - return 0; - } - /* handle not found try to alloc a new one */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { - adev->uvd.filp[i] = ctx->parser->filp; - return 0; - } + default: + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } @@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) } static int amdgpu_uvd_free_job( - struct amdgpu_job *sched_job) + struct amdgpu_job *job) { - amdgpu_ib_free(sched_job->adev, sched_job->ibs); - kfree(sched_job->ibs); + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); return 0; } @@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &bo); + NULL, NULL, &bo); if (r) return r; @@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &bo); + NULL, NULL, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 3cab96c42aa8..74f2038ac747 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->vce.vcpu_bo); + NULL, NULL, &adev->vce.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); return r; @@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } static int amdgpu_vce_free_job( - struct amdgpu_job *sched_job) + struct amdgpu_job *job) { - amdgpu_ib_free(sched_job->adev, sched_job->ibs); - kfree(sched_job->ibs); + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f68b7cdc370a..53d551f2d839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } -int amdgpu_vm_free_job(struct amdgpu_job *sched_job) +int amdgpu_vm_free_job(struct amdgpu_job *job) { int i; - for (i = 0; i < sched_job->num_ibs; i++) - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); - kfree(sched_job->ibs); + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); return 0; } @@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, return -ENOMEM; r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) + if (r) { + kfree(ib); return r; + } ib->length_dw = 0; /* walk over the address space and update the page directory */ @@ -686,31 +688,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, } /** - * amdgpu_vm_fence_pts - fence page tables after an update - * - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * @fence: fence to use - * - * Fence the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! - */ -static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, - uint64_t start, uint64_t end, - struct fence *fence) -{ - unsigned i; - - start >>= amdgpu_vm_block_size; - end >>= amdgpu_vm_block_size; - - for (i = start; i <= end; ++i) - amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); -} - -/** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer @@ -813,8 +790,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_fence_pts(vm, mapping->it.start, - mapping->it.last + 1, f); + amdgpu_bo_fence(vm->page_directory, f, true); if (fence) { fence_put(*fence); *fence = fence_get(f); @@ -855,7 +831,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, int r; if (mem) { - addr = mem->start << PAGE_SHIFT; + addr = (u64)mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_TT) addr += adev->vm_manager.vram_base_offset; } else { @@ -1089,6 +1065,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { + struct reservation_object *resv = vm->page_directory->tbo.resv; struct amdgpu_bo *pt; if (vm->page_tables[pt_idx].bo) @@ -1097,11 +1074,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* drop mutex to allocate and clear page table */ mutex_unlock(&vm->mutex); + ww_mutex_lock(&resv->lock, NULL); r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, - NULL, &pt); + NULL, resv, &pt); + ww_mutex_unlock(&resv->lock); if (r) goto error_free; @@ -1303,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, - NULL, &vm->page_directory); + NULL, NULL, &vm->page_directory); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 82e8d0730517..a1a35a5df8e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle) if (!amdgpu_dpm) return 0; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + ret = ci_set_temperature_range(adev); if (ret) return ret; @@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4b6ce74753cd..484710cfdf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) int ret, i; u16 tmp16; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 44fa96ad4709..2e3373ed4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_dpm) { + int ret; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + /* powerdown unused blocks for now */ cz_dpm_powergate_uvd(adev, true); cz_dpm_powergate_vce(adev, true); @@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle) if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_init_failed; - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index a72ffc7d6c26..e33180d3314a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev) * 3. map kernel virtual address */ ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + toc_buf); if (ret) { dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); @@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev) } ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + smu_buf); if (ret) { dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e4d101b1252a..d4c82b625727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v10_0_page_flip - pageflip callback. * @@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; @@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle) dce_v10_0_hpd_fini(adev); + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle) /* initialize hpd */ dce_v10_0_hpd_init(adev); + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6411e8244671..7e1cf5e4eebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v11_0_page_flip - pageflip callback. * @@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; @@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle) switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 4; + adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; @@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle) dce_v11_0_hpd_fini(adev); + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle) /* initialize hpd */ dce_v11_0_hpd_init(adev); + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c86911c2ea2a..34b9c2a9d8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v8_0_page_flip - pageflip callback. * @@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; @@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle) dce_v8_0_hpd_fini(adev); + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle) /* initialize hpd */ dce_v8_0_hpd_init(adev); + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index 322edea65857..bda1249eb871 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; @@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, smu_buf); + NULL, NULL, smu_buf); if (ret) { DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 4bd1e5cf65ca..e992bf2ff66c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, sizeof(struct bonaire_mqd), PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); @@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - - /* let CE wait till condition satisfied */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - amdgpu_ring_write(ring, 0xffffffff); - amdgpu_ring_write(ring, 4); /* poll interval */ - - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - /* * vm * VMID 0 is the physical GPU addresses as used by the kernel. @@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | @@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0x0); /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v7_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } @@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.save_restore_obj); + NULL, NULL, + &adev->gfx.rlc.save_restore_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.clear_state_obj); + NULL, NULL, + &adev->gfx.rlc.clear_state_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.cp_table_obj); + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; @@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle) r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, &adev->gds.gds_gfx_bo); + NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, &adev->gds.gws_gfx_bo); + NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_OA, 0, - NULL, &adev->gds.oa_gfx_bo); + NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 53f07439a512..cb4f68f53f24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - /* set up the gfx ring */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle) /* reserve GDS, GWS and OA resource for gfx */ r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, + AMDGPU_GEM_DOMAIN_GDS, 0, NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, + AMDGPU_GEM_DOMAIN_GWS, 0, NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, + AMDGPU_GEM_DOMAIN_OA, 0, NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v8_0_mec_fini(adev); return 0; @@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) sizeof(struct vi_mqd), PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - &ring->mqd_obj); + NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); return r; @@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); + } /** @@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, return true; } -static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) +static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint64_t addr = ring->fence_drv.gpu_addr; - /* let CE wait till condition satisfied */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3))); /* equal */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 4); /* poll interval */ - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - -static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) -{ - int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | - WRITE_DATA_DST_SEL(0))); + WRITE_DATA_DST_SEL(0)) | + WR_CONFIRM); if (vm_id < 8) { amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); @@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* sync PFP to ME, otherwise we might get invalid PFP reads */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); - - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v8_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index c900aa942ade..966d4b2ed9da 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 94ec04a9c4d5..7e9154c7f1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2995,6 +2995,15 @@ static int kv_dpm_late_init(void *handle) { /* powerdown unused blocks for now */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (!amdgpu_dpm) + return 0; + + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); @@ -3038,9 +3047,6 @@ static int kv_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 1f5ac941a610..5421309c1862 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; @@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, smu_buf); + NULL, NULL, smu_buf); if (ret) { DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5fac5da694f0..ed50dd725788 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v4_2_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); + r = uvd_v4_2_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 2d5c59c318af..9ad8b9906c0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v5_0_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); + r = uvd_v5_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d9f553fce531..7e9934fa4193 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + } r = uvd_v6_0_hw_fini(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); - if (r) - return r; - return r; } @@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_resume(adev); + if (r) + return r; + } r = uvd_v6_0_hw_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 552d9e75ad1b..0bac8702e934 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) u32 mask; int ret; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; @@ -1400,7 +1403,8 @@ static int vi_common_early_init(void *handle) case CHIP_CARRIZO: adev->has_uvd = true; adev->cg_flags = 0; - adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; + /* Disable UVD pg */ + adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x1; if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -27,19 +27,6 @@ #include "cgs_common.h" /** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - -/** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source * @src_id: interrupt source ID @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h new file mode 100644 index 000000000000..144f50acc971 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -0,0 +1,41 @@ +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_sched +#define TRACE_INCLUDE_FILE gpu_sched_trace + +TRACE_EVENT(amd_sched_job, + TP_PROTO(struct amd_sched_job *sched_job), + TP_ARGS(sched_job), + TP_STRUCT__entry( + __field(struct amd_sched_entity *, entity) + __field(const char *, name) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = sched_job->s_entity; + __entry->name = sched_job->sched->name; + __entry->job_count = kfifo_len( + &sched_job->s_entity->job_queue) / sizeof(sched_job); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->name, __entry->job_count, + __entry->hw_job_count) +); +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 9259f1b6664c..3697eeeecf82 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -27,6 +27,9 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" +#define CREATE_TRACE_POINTS +#include "gpu_sched_trace.h" + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); @@ -65,29 +68,29 @@ static struct amd_sched_job * amd_sched_rq_select_job(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *job; + struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - job = amd_sched_entity_pop_job(entity); - if (job) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { rq->current_entity = entity; spin_unlock(&rq->lock); - return job; + return sched_job; } } } list_for_each_entry(entity, &rq->entities, list) { - job = amd_sched_entity_pop_job(entity); - if (job) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { rq->current_entity = entity; spin_unlock(&rq->lock); - return job; + return sched_job; } if (entity == rq->current_entity) @@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, struct amd_sched_rq *rq, uint32_t jobs) { + int r; + if (!(sched && entity && rq)) return -EINVAL; memset(entity, 0, sizeof(struct amd_sched_entity)); - entity->belongto_rq = rq; - entity->scheduler = sched; - entity->fence_context = fence_context_alloc(1); - if(kfifo_alloc(&entity->job_queue, - jobs * sizeof(void *), - GFP_KERNEL)) - return -EINVAL; + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; spin_lock_init(&entity->queue_lock); + r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); + if (r) + return r; + atomic_set(&entity->fence_seq, 0); + entity->fence_context = fence_context_alloc(1); /* Add the entity to the run queue */ amd_sched_rq_add_entity(rq, entity); + return 0; } @@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { - return entity->scheduler == sched && - entity->belongto_rq != NULL; + return entity->sched == sched && + entity->rq != NULL; } /** @@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { - struct amd_sched_rq *rq = entity->belongto_rq; + struct amd_sched_rq *rq = entity->rq; if (!amd_sched_entity_is_initialized(sched, entity)) return; @@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; fence_put(f); - amd_sched_wakeup(entity->scheduler); + amd_sched_wakeup(entity->sched); } static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { - struct amd_gpu_scheduler *sched = entity->scheduler; - struct amd_sched_job *job; + struct amd_gpu_scheduler *sched = entity->sched; + struct amd_sched_job *sched_job; if (ACCESS_ONCE(entity->dependency)) return NULL; - if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) + if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(job))) { + while ((entity->dependency = sched->ops->dependency(sched_job))) { if (fence_add_callback(entity->dependency, &entity->cb, amd_sched_entity_wakeup)) @@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) return NULL; } - return job; + return sched_job; } /** * Helper to submit a job to the job queue * - * @job The pointer to job required to submit + * @sched_job The pointer to job required to submit * * Returns true if we could submit the job. */ -static bool amd_sched_entity_in(struct amd_sched_job *job) +static bool amd_sched_entity_in(struct amd_sched_job *sched_job) { - struct amd_sched_entity *entity = job->s_entity; + struct amd_sched_entity *entity = sched_job->s_entity; bool added, first = false; spin_lock(&entity->queue_lock); - added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); + added = kfifo_in(&entity->job_queue, &sched_job, + sizeof(sched_job)) == sizeof(sched_job); - if (added && kfifo_len(&entity->job_queue) == sizeof(job)) + if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) first = true; spin_unlock(&entity->queue_lock); /* first job wakes up scheduler */ if (first) - amd_sched_wakeup(job->sched); + amd_sched_wakeup(sched_job->sched); return added; } @@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job) /** * Submit a job to the job queue * - * @job The pointer to job required to submit + * @sched_job The pointer to job required to submit * * Returns 0 for success, negative error code otherwise. */ @@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) fence_get(&fence->base); sched_job->s_fence = fence; - wait_event(entity->scheduler->job_scheduled, + wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); - + trace_amd_sched_job(sched_job); return 0; } @@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) static struct amd_sched_job * amd_sched_select_job(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *job; + struct amd_sched_job *sched_job; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - job = amd_sched_rq_select_job(&sched->kernel_rq); - if (job == NULL) - job = amd_sched_rq_select_job(&sched->sched_rq); + sched_job = amd_sched_rq_select_job(&sched->kernel_rq); + if (sched_job == NULL) + sched_job = amd_sched_rq_select_job(&sched->sched_rq); - return job; + return sched_job; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *sched_job = - container_of(cb, struct amd_sched_job, cb); - struct amd_gpu_scheduler *sched; + struct amd_sched_fence *s_fence = + container_of(cb, struct amd_sched_fence, cb); + struct amd_gpu_scheduler *sched = s_fence->sched; - sched = sched_job->sched; - amd_sched_fence_signal(sched_job->s_fence); atomic_dec(&sched->hw_rq_count); - fence_put(&sched_job->s_fence->base); - sched->ops->process_job(sched_job); + amd_sched_fence_signal(s_fence); + fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } @@ -338,87 +344,82 @@ static int amd_sched_main(void *param) while (!kthread_should_stop()) { struct amd_sched_entity *entity; - struct amd_sched_job *job; + struct amd_sched_fence *s_fence; + struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, kthread_should_stop() || - (job = amd_sched_select_job(sched))); + (sched_job = amd_sched_select_job(sched))); - if (!job) + if (!sched_job) continue; - entity = job->s_entity; + entity = sched_job->s_entity; + s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - fence = sched->ops->run_job(job); + fence = sched->ops->run_job(sched_job); if (fence) { - r = fence_add_callback(fence, &job->cb, + r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) - amd_sched_process_job(fence, &job->cb); + amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); } - count = kfifo_out(&entity->job_queue, &job, sizeof(job)); - WARN_ON(count != sizeof(job)); + count = kfifo_out(&entity->job_queue, &sched_job, + sizeof(sched_job)); + WARN_ON(count != sizeof(sched_job)); wake_up(&sched->job_scheduled); } return 0; } /** - * Create a gpu scheduler + * Init a gpu scheduler instance * + * @sched The pointer to the scheduler * @ops The backend operations for this scheduler. - * @ring The the ring id for the scheduler. * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging * - * Return the pointer to scheduler for success, otherwise return NULL + * Return 0 on success, otherwise error code. */ -struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, - unsigned ring, unsigned hw_submission, - void *priv) +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + unsigned hw_submission, const char *name) { - struct amd_gpu_scheduler *sched; - - sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); - if (!sched) - return NULL; - sched->ops = ops; - sched->ring_id = ring; sched->hw_submission_limit = hw_submission; - sched->priv = priv; - snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring); + sched->name = name; amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->kernel_rq); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); if (IS_ERR(sched->thread)) { - DRM_ERROR("Failed to create scheduler for id %d.\n", ring); - kfree(sched); - return NULL; + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); } - return sched; + return 0; } /** * Destroy a gpu scheduler * * @sched The pointer to the scheduler - * - * return 0 if succeed. -1 if failed. */ -int amd_sched_destroy(struct amd_gpu_scheduler *sched) +void amd_sched_fini(struct amd_gpu_scheduler *sched) { kthread_stop(sched->thread); - kfree(sched); - return 0; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 2af0e4d4d817..80b64dc22214 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -38,13 +38,15 @@ struct amd_sched_rq; */ struct amd_sched_entity { struct list_head list; - struct amd_sched_rq *belongto_rq; - atomic_t fence_seq; - /* the job_queue maintains the jobs submitted by clients */ - struct kfifo job_queue; + struct amd_sched_rq *rq; + struct amd_gpu_scheduler *sched; + spinlock_t queue_lock; - struct amd_gpu_scheduler *scheduler; + struct kfifo job_queue; + + atomic_t fence_seq; uint64_t fence_context; + struct fence *dependency; struct fence_cb cb; }; @@ -62,13 +64,13 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; - struct amd_gpu_scheduler *scheduler; + struct fence_cb cb; + struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; }; struct amd_sched_job { - struct fence_cb cb; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; @@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - struct fence *(*dependency)(struct amd_sched_job *job); - struct fence *(*run_job)(struct amd_sched_job *job); - void (*process_job)(struct amd_sched_job *job); + struct fence *(*dependency)(struct amd_sched_job *sched_job); + struct fence *(*run_job)(struct amd_sched_job *sched_job); }; /** * One scheduler is implemented for each hardware ring */ struct amd_gpu_scheduler { - struct task_struct *thread; + struct amd_sched_backend_ops *ops; + uint32_t hw_submission_limit; + const char *name; struct amd_sched_rq sched_rq; struct amd_sched_rq kernel_rq; - atomic_t hw_rq_count; - struct amd_sched_backend_ops *ops; - uint32_t ring_id; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; - uint32_t hw_submission_limit; - char name[20]; - void *priv; + atomic_t hw_rq_count; + struct task_struct *thread; }; -struct amd_gpu_scheduler * -amd_sched_create(struct amd_sched_backend_ops *ops, - uint32_t ring, uint32_t hw_submission, void *priv); -int amd_sched_destroy(struct amd_gpu_scheduler *sched); +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + uint32_t hw_submission, const char *name); +void amd_sched_fini(struct amd_gpu_scheduler *sched); int amd_sched_entity_init(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index e62c37920e11..d802638094f4 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity if (fence == NULL) return NULL; fence->owner = owner; - fence->scheduler = s_entity->scheduler; + fence->sched = s_entity->sched; spin_lock_init(&fence->lock); seq = atomic_inc_return(&s_entity->fence_seq); @@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence) static const char *amd_sched_fence_get_timeline_name(struct fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); - return (const char *)fence->scheduler->name; + return (const char *)fence->sched->name; } static bool amd_sched_fence_enable_signaling(struct fence *f) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 33d877c65ced..8328e7059205 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4105,7 +4105,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length) + if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); @@ -4454,7 +4454,7 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, * not associated with any file_priv. */ mutex_lock(&dev->mode_config.blob_lock); out_resp->blob_id = blob->base.id; - list_add_tail(&file_priv->blobs, &blob->head_file); + list_add_tail(&blob->head_file, &file_priv->blobs); mutex_unlock(&dev->mode_config.blob_lock); return 0; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..809959d56d78 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1182,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - if (!port->mstb) { + mstb = port->mstb; + if (!mstb) { DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); - return NULL; + goto out; } - mstb = port->mstb; break; } } } kref_get(&mstb->kref); +out: mutex_unlock(&mgr->lock); return mstb; } @@ -1202,10 +1215,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1470,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1479,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1512,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1992,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2279,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2687,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2710,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2766,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); @@ -2782,12 +2802,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs if (msgs[num - 1].flags & I2C_M_RD) reading = true; - if (!reading) { + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); ret = -EIO; goto out; } + memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; msg.u.i2c_read.port_number = port->port_num; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9a860ca1e9d7..d93e7378c077 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit); /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, + DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0f6cd33b531f..684bd4a13843 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, char *buf) { struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - uint64_t dpms_status; - int ret; + int dpms; - ret = drm_object_property_get_value(&connector->base, - dev->mode_config.dpms_property, - &dpms_status); - if (ret) - return 0; + dpms = READ_ONCE(connector->dpms); return snprintf(buf, PAGE_SIZE, "%s\n", - drm_get_dpms_name((int)dpms_status)); + drm_get_dpms_name(dpms)); } static ssize_t enabled_show(struct device *device, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 82be6b86a168..d1e300dcd544 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; - unsigned int index, value, ret; + unsigned int value; + int index, ret; index = fsl_dcu_drm_plane_index(plane); if (index < 0) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ab37d1121be8..990f656e6ab0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -832,6 +832,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->csr_lock); + mutex_init(&dev_priv->av_mutex); intel_pm_setup(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e1db8de52851..22dd7043c9ef 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1885,6 +1885,11 @@ struct drm_i915_private { /* hda/i915 audio component */ struct i915_audio_component *audio_component; bool audio_component_registered; + /** + * av_mutex - mutex for audio/video sync + * + */ + struct mutex av_mutex; uint32_t hw_context_size; struct list_head context_list; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f6ecbda2c604..674341708033 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, } /** - * i915_gem_shrink - Shrink buffer object caches completely + * i915_gem_shrink_all - Shrink buffer object caches completely * @dev_priv: i915 device * * This is a simple wraper around i915_gem_shrink() to aggressively shrink all diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 8fd431bcdfd3..a96b9006a51e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { * Also note, that the object created here is not currently a "first class" * object, in that several ioctls are banned. These are the CPU access * ioctls: mmap(), pwrite and pread. In practice, you are expected to use - * direct access via your pointer rather than use those ioctls. + * direct access via your pointer rather than use those ioctls. Another + * restriction is that we do not allow userptr surfaces to be pinned to the + * hardware and so we reject any attempt to create a framebuffer out of a + * userptr. * * If you think this is a good interface to use to pass GPU memory between * drivers, please use dma-buf instead. In fact, wherever possible use diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5a244ab9395b..39d73dbc1c47 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; /* + * On HSW, the DSL reg (0x70000) appears to return 0 if we + * read it just before the start of vblank. So try it again + * so we don't accidentally end up spanning a vblank frame + * increment, causing the pipe_update_end() code to squak at us. + * + * The nature of this problem means we can't simply check the ISR + * bit and return the vblank start value; nor can we use the scanline + * debug register in the transcoder as it appears to have the same + * problem. We may need to extend this to include other platforms, + * but so far testing only shows the problem on HSW. + */ + if (IS_HASWELL(dev) && !position) { + int i, temp; + + for (i = 0; i < 100; i++) { + udelay(1); + temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & + DSL_LINEMASK_GEN3; + if (temp != position) { + position = temp; + break; + } + } + } + + /* * See update_scanline_offset() for the details on the * scanline_offset adjustment. */ diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 89c1a8ce1f98..ae8df0a43de6 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -68,6 +68,31 @@ static const struct { { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, }; +/* HDMI N/CTS table */ +#define TMDS_297M 297000 +#define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001) +static const struct { + int sample_rate; + int clock; + int n; + int cts; +} aud_ncts[] = { + { 44100, TMDS_296M, 4459, 234375 }, + { 44100, TMDS_297M, 4704, 247500 }, + { 48000, TMDS_296M, 5824, 281250 }, + { 48000, TMDS_297M, 5120, 247500 }, + { 32000, TMDS_296M, 5824, 421875 }, + { 32000, TMDS_297M, 3072, 222750 }, + { 88200, TMDS_296M, 8918, 234375 }, + { 88200, TMDS_297M, 9408, 247500 }, + { 96000, TMDS_296M, 11648, 281250 }, + { 96000, TMDS_297M, 10240, 247500 }, + { 176400, TMDS_296M, 17836, 234375 }, + { 176400, TMDS_297M, 18816, 247500 }, + { 192000, TMDS_296M, 23296, 281250 }, + { 192000, TMDS_297M, 20480, 247500 }, +}; + /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) { @@ -90,6 +115,45 @@ static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) return hdmi_audio_clock[i].config; } +static int audio_config_get_n(const struct drm_display_mode *mode, int rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) { + if ((rate == aud_ncts[i].sample_rate) && + (mode->clock == aud_ncts[i].clock)) { + return aud_ncts[i].n; + } + } + return 0; +} + +static uint32_t audio_config_setup_n_reg(int n, uint32_t val) +{ + int n_low, n_up; + uint32_t tmp = val; + + n_low = n & 0xfff; + n_up = (n >> 12) & 0xff; + tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK); + tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) | + (n_low << AUD_CONFIG_LOWER_N_SHIFT) | + AUD_CONFIG_N_PROG_ENABLE); + return tmp; +} + +/* check whether N/CTS/M need be set manually */ +static bool audio_rate_need_prog(struct intel_crtc *crtc, + const struct drm_display_mode *mode) +{ + if (((mode->clock == TMDS_297M) || + (mode->clock == TMDS_296M)) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + return true; + else + return false; +} + static bool intel_eld_uptodate(struct drm_connector *connector, int reg_eldv, uint32_t bits_eldv, int reg_elda, uint32_t bits_elda, @@ -184,6 +248,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe)); + mutex_lock(&dev_priv->av_mutex); + /* Disable timestamps */ tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; @@ -199,6 +265,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) tmp &= ~AUDIO_ELD_VALID(pipe); tmp &= ~AUDIO_OUTPUT_ENABLE(pipe); I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + + mutex_unlock(&dev_priv->av_mutex); } static void hsw_audio_codec_enable(struct drm_connector *connector, @@ -208,13 +276,20 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum pipe pipe = intel_crtc->pipe; + struct i915_audio_component *acomp = dev_priv->audio_component; const uint8_t *eld = connector->eld; + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); + enum port port = intel_dig_port->port; uint32_t tmp; int len, i; + int n, rate; DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n", pipe_name(pipe), drm_eld_size(eld)); + mutex_lock(&dev_priv->av_mutex); + /* Enable audio presence detect, invalidate ELD */ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); tmp |= AUDIO_OUTPUT_ENABLE(pipe); @@ -246,13 +321,32 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, /* Enable timestamps */ tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; - tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(mode); + + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; + if (audio_rate_need_prog(intel_crtc, mode)) { + if (!acomp) + rate = 0; + else if (port >= PORT_A && port <= PORT_E) + rate = acomp->aud_sample_rate[port]; + else { + DRM_ERROR("invalid port: %d\n", port); + rate = 0; + } + n = audio_config_get_n(mode, rate); + if (n != 0) + tmp = audio_config_setup_n_reg(n, tmp); + else + DRM_DEBUG_KMS("no suitable N value is found\n"); + } + I915_WRITE(HSW_AUD_CFG(pipe), tmp); + + mutex_unlock(&dev_priv->av_mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder) @@ -430,7 +524,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /** * intel_audio_codec_disable - Disable the audio codec for HD audio - * @encoder: encoder on which to disable audio + * @intel_encoder: encoder on which to disable audio * * The disable sequences must be performed before disabling the transcoder or * port. @@ -527,12 +621,91 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev) return ret; } +static int i915_audio_component_sync_audio_rate(struct device *dev, + int port, int rate) +{ + struct drm_i915_private *dev_priv = dev_to_i915(dev); + struct drm_device *drm_dev = dev_priv->dev; + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + struct intel_crtc *crtc; + struct drm_display_mode *mode; + struct i915_audio_component *acomp = dev_priv->audio_component; + enum pipe pipe = -1; + u32 tmp; + int n; + + /* HSW, BDW SKL need this fix */ + if (!IS_SKYLAKE(dev_priv) && + !IS_BROADWELL(dev_priv) && + !IS_HASWELL(dev_priv)) + return 0; + + mutex_lock(&dev_priv->av_mutex); + /* 1. get the pipe */ + for_each_intel_encoder(drm_dev, intel_encoder) { + if (intel_encoder->type != INTEL_OUTPUT_HDMI) + continue; + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + if (port == intel_dig_port->port) { + crtc = to_intel_crtc(intel_encoder->base.crtc); + if (!crtc) { + DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__); + continue; + } + pipe = crtc->pipe; + break; + } + } + + if (pipe == INVALID_PIPE) { + DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); + mutex_unlock(&dev_priv->av_mutex); + return -ENODEV; + } + DRM_DEBUG_KMS("pipe %c connects port %c\n", + pipe_name(pipe), port_name(port)); + mode = &crtc->config->base.adjusted_mode; + + /* port must be valid now, otherwise the pipe will be invalid */ + acomp->aud_sample_rate[port] = rate; + + /* 2. check whether to set the N/CTS/M manually or not */ + if (!audio_rate_need_prog(crtc, mode)) { + tmp = I915_READ(HSW_AUD_CFG(pipe)); + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; + I915_WRITE(HSW_AUD_CFG(pipe), tmp); + mutex_unlock(&dev_priv->av_mutex); + return 0; + } + + n = audio_config_get_n(mode, rate); + if (n == 0) { + DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n", + port_name(port)); + tmp = I915_READ(HSW_AUD_CFG(pipe)); + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; + I915_WRITE(HSW_AUD_CFG(pipe), tmp); + mutex_unlock(&dev_priv->av_mutex); + return 0; + } + + /* 3. set the N/CTS/M */ + tmp = I915_READ(HSW_AUD_CFG(pipe)); + tmp = audio_config_setup_n_reg(n, tmp); + I915_WRITE(HSW_AUD_CFG(pipe), tmp); + + mutex_unlock(&dev_priv->av_mutex); + return 0; +} + static const struct i915_audio_component_ops i915_audio_component_ops = { .owner = THIS_MODULE, .get_power = i915_audio_component_get_power, .put_power = i915_audio_component_put_power, .codec_wake_override = i915_audio_component_codec_wake_override, .get_cdclk_freq = i915_audio_component_get_cdclk_freq, + .sync_audio_rate = i915_audio_component_sync_audio_rate, }; static int i915_audio_component_bind(struct device *i915_dev, @@ -540,6 +713,7 @@ static int i915_audio_component_bind(struct device *i915_dev, { struct i915_audio_component *acomp = data; struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); + int i; if (WARN_ON(acomp->ops || acomp->dev)) return -EEXIST; @@ -547,6 +721,9 @@ static int i915_audio_component_bind(struct device *i915_dev, drm_modeset_lock_all(dev_priv->dev); acomp->ops = &i915_audio_component_ops; acomp->dev = i915_dev; + BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); + for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) + acomp->aud_sample_rate[i] = 0; dev_priv->audio_component = acomp; drm_modeset_unlock_all(dev_priv->dev); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b3e437b3bb54..c19e669ffe50 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id) const struct bdb_header *bdb = _bdb; const u8 *base = _bdb; int index = 0; - u16 total, current_size; + u32 total, current_size; u8 current_id; /* skip to first section */ @@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id) current_size = *((const u16 *)(base + index)); index += 2; + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) + current_size = *((const u32 *)(base + index + 1)); + if (index + current_size > total) return NULL; @@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) return; } + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 3) { + DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); + return; + } + DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); block_size = get_blocksize(sequence); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8cc9264f7809..b2270d576979 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); } + /* + * Apparently we need to have VGA mode enabled prior to changing + * the P1/P2 dividers. Otherwise the DPLL will keep using the old + * dividers, even though the register value does change. + */ + I915_WRITE(reg, 0); + + I915_WRITE(reg, dpll); + /* Wait for the clocks to stabilize. */ POSTING_READ(reg); udelay(150); @@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; + if (obj->userptr.mm) { + DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); + return -EINVAL; + } + return drm_gem_handle_create(file, &obj->base, handle); } @@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* restore vblank interrupts to correct state */ drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { + struct intel_plane *plane; + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); drm_crtc_vblank_on(&crtc->base); + + /* Disable everything but the primary plane */ + for_each_intel_plane_on_crtc(dev, crtc, plane) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + continue; + + plane->disable_plane(&plane->base, &crtc->base); + } } /* We need to sanitize the plane -> pipe mapping first because this will @@ -15067,35 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev) i915_redisable_vga_power_on(dev); } -static bool primary_get_hw_state(struct intel_crtc *crtc) +static bool primary_get_hw_state(struct intel_plane *plane) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); + return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; } -static void readout_plane_state(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +/* FIXME read out full plane state for all planes */ +static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *p; - struct intel_plane_state *plane_state; - bool active = crtc_state->base.active; - - for_each_intel_plane(crtc->base.dev, p) { - if (crtc->pipe != p->pipe) - continue; - - plane_state = to_intel_plane_state(p->base.state); + struct drm_plane *primary = crtc->base.primary; + struct intel_plane_state *plane_state = + to_intel_plane_state(primary->state); - if (p->base.type == DRM_PLANE_TYPE_PRIMARY) - plane_state->visible = primary_get_hw_state(crtc); - else { - if (active) - p->disable_plane(&p->base, &crtc->base); + plane_state->visible = + primary_get_hw_state(to_intel_plane(primary)); - plane_state->visible = false; - } - } + if (plane_state->visible) + crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15118,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.state->active = crtc->active; crtc->base.enabled = crtc->active; - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); - if (crtc->base.state->active) { - intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); - intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); - - /* - * The initial mode needs to be set in order to keep - * the atomic core happy. It wants a valid mode if the - * crtc's enabled, so we do the above call. - * - * At this point some state updated by the connectors - * in their ->detect() callback has not run yet, so - * no recalculation can be done yet. - * - * Even if we could do a recalculation and modeset - * right now it would cause a double modeset if - * fbdev or userspace chooses a different initial mode. - * - * If that happens, someone indicated they wanted a - * mode change, which means it's safe to do a full - * recalculation. - */ - crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; - } - - crtc->base.hwmode = crtc->config->base.adjusted_mode; - readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state)); + readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", crtc->base.base.id, @@ -15204,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.name, connector->base.encoder ? "enabled" : "disabled"); } + + for_each_intel_crtc(dev, crtc) { + crtc->base.hwmode = crtc->config->base.adjusted_mode; + + memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); + if (crtc->base.state->active) { + intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); + intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); + WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); + + /* + * The initial mode needs to be set in order to keep + * the atomic core happy. It wants a valid mode if the + * crtc's enabled, so we do the above call. + * + * At this point some state updated by the connectors + * in their ->detect() callback has not run yet, so + * no recalculation can be done yet. + * + * Even if we could do a recalculation and modeset + * right now it would cause a double modeset if + * fbdev or userspace chooses a different initial mode. + * + * If that happens, someone indicated they wanted a + * mode change, which means it's safe to do a full + * recalculation. + */ + crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + } + } } /* Scan out the current hw modeset state, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..29dd4488dc49 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); @@ -1634,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..61b451fbd09e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; @@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 87de15ea1f93..b35b5b2db4ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper, sysram = vmalloc(size); if (!sysram) - return -ENOMEM; + goto err_sysram; info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) - return PTR_ERR(info); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_alloc_fbi; + } info->par = mfbdev; ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); if (ret) - return ret; + goto err_framebuffer_init; mfbdev->sysram = sysram; mfbdev->size = size; @@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); + return 0; + +err_framebuffer_init: + drm_fb_helper_release_fbi(helper); +err_alloc_fbi: + vfree(sysram); +err_sysram: + drm_gem_object_unreference_unlocked(gobj); + + return ret; } static int mga_fbdev_destroy(struct drm_device *dev, @@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev) ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); if (ret) - return ret; + goto err_fb_helper; ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); if (ret) - goto fini; + goto err_fb_setup; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(mdev->dev); ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); if (ret) - goto fini; + goto err_fb_setup; return 0; -fini: +err_fb_setup: drm_fb_helper_fini(&mfbdev->helper); +err_fb_helper: + mdev->mfbdev = NULL; + return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index de06388069e7..b1a0f5656175 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) } r = mgag200_mm_init(mdev); if (r) - goto out; + goto err_mm; drm_mode_config_init(dev); dev->mode_config.funcs = (void *)&mga_mode_funcs; @@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) r = mgag200_modeset_init(mdev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); - goto out; + goto err_modeset; } /* Make small buffers to store a hardware cursor (double buffered icon updates) */ @@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) &mdev->cursor.pixels_1); mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, &mdev->cursor.pixels_2); - if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) - goto cursor_nospace; - mdev->cursor.pixels_current = mdev->cursor.pixels_1; - mdev->cursor.pixels_prev = mdev->cursor.pixels_2; - goto cursor_done; - cursor_nospace: - mdev->cursor.pixels_1 = NULL; - mdev->cursor.pixels_2 = NULL; - dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); - cursor_done: - -out: - if (r) - mgag200_driver_unload(dev); + if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) { + mdev->cursor.pixels_1 = NULL; + mdev->cursor.pixels_2 = NULL; + dev_warn(&dev->pdev->dev, + "Could not allocate space for cursors. Not doing hardware cursors.\n"); + } else { + mdev->cursor.pixels_current = mdev->cursor.pixels_1; + mdev->cursor.pixels_prev = mdev->cursor.pixels_2; + } + + return 0; + +err_modeset: + drm_mode_config_cleanup(dev); + mgag200_mm_fini(mdev); +err_mm: + dev->dev_private = NULL; + return r; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cc6c228e11c8..e905c00acf1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev) if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; - } else { + } else + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else { + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; } dev->mode_config.preferred_depth = 24; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2791701685dc..59f27e774acb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nouveau_fbcon_fillrect, @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2c9981512d27..41be584147b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nvkm_vma *vma; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) + if (is_power_of_2(nvbo->valid_domains)) + rep->domain = nvbo->valid_domains; + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; - rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 65af31441e9c..a7d69ce7abc1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index) index = NVKM_I2C_BUS_PRI; if (init->outp && init->outp->i2c_upper_default) index = NVKM_I2C_BUS_SEC; + } else + if (index == 0x80) { + index = NVKM_I2C_BUS_PRI; + } else + if (index == 0x81) { + index = NVKM_I2C_BUS_SEC; } bus = nvkm_i2c_bus_find(i2c, index); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h index e0ec2a6b7b79..212800ecdce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h @@ -8,7 +8,10 @@ struct nvbios_source { void *(*init)(struct nvkm_bios *, const char *); void (*fini)(void *); u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); + u32 (*size)(void *); bool rw; + bool ignore_checksum; + bool no_pcir; }; int nvbios_extend(struct nvkm_bios *, u32 length); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 792f017525f6..b2557e87afdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) u32 read = mthd->func->read(data, start, limit - start, bios); bios->size = start + read; } - return bios->size >= limit; + return bios->size >= upto; } static int @@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) struct nvbios_image image; int score = 1; - if (!shadow_fetch(bios, mthd, offset + 0x1000)) { - nvkm_debug(subdev, "%08x: header fetch failed\n", offset); - return 0; - } + if (mthd->func->no_pcir) { + image.base = 0; + image.type = 0; + image.size = mthd->func->size(mthd->data); + image.last = 1; + } else { + if (!shadow_fetch(bios, mthd, offset + 0x1000)) { + nvkm_debug(subdev, "%08x: header fetch failed\n", + offset); + return 0; + } - if (!nvbios_image(bios, idx, &image)) { - nvkm_debug(subdev, "image %d invalid\n", idx); - return 0; + if (!nvbios_image(bios, idx, &image)) { + nvkm_debug(subdev, "image %d invalid\n", idx); + return 0; + } } nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); @@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) switch (image.type) { case 0x00: - if (nvbios_checksum(&bios->data[image.base], image.size)) { + if (!mthd->func->ignore_checksum && + nvbios_checksum(&bios->data[image.base], image.size)) { nvkm_debug(subdev, "%08x: checksum failed\n", image.base); if (mthd->func->rw) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index bd60d7dd09f5..4bf486b57101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c @@ -21,6 +21,7 @@ * */ #include "priv.h" + #include <core/pci.h> #if defined(__powerpc__) @@ -33,17 +34,26 @@ static u32 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) { struct priv *priv = data; - if (offset + length <= priv->size) { + if (offset < priv->size) { + length = min_t(u32, length, priv->size - offset); memcpy_fromio(bios->data + offset, priv->data + offset, length); return length; } return 0; } +static u32 +of_size(void *data) +{ + struct priv *priv = data; + return priv->size; +} + static void * of_init(struct nvkm_bios *bios, const char *name) { - struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev; + struct nvkm_device *device = bios->subdev.device; + struct pci_dev *pdev = device->func->pci(device)->pdev; struct device_node *dn; struct priv *priv; if (!(dn = pci_device_to_OF_node(pdev))) @@ -62,7 +72,10 @@ nvbios_of = { .init = of_init, .fini = (void(*)(void *))kfree, .read = of_read, + .size = of_size, .rw = false, + .ignore_checksum = true, + .no_pcir = true, }; #else const struct nvbios_source diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c index 814cb51cc873..385a90f91ed6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c @@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk nvkm_device_agp_quirks[] = { /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, + /* SiS 761 does not support AGP cards, use PCI mode */ + { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 }, {}, }; @@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci) while (quirk->hostbridge_vendor) { if (info.device->vendor == quirk->hostbridge_vendor && info.device->device == quirk->hostbridge_device && - pci->pdev->vendor == quirk->chip_vendor && - pci->pdev->device == quirk->chip_device) { + (quirk->chip_vendor == (u16)PCI_ANY_ID || + pci->pdev->vendor == quirk->chip_vendor) && + (quirk->chip_device == (u16)PCI_ANY_ID || + pci->pdev->device == quirk->chip_device)) { nvkm_info(subdev, "forcing default agp mode to %dX, " "use NvAGP=<mode> to override\n", quirk->mode); diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index a7b4939cee6d..6989238b276a 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -287,7 +287,6 @@ static struct spi_driver lg4573_driver = { .remove = lg4573_remove, .driver = { .name = "lg4573", - .owner = THIS_MODULE, .of_match_table = lg4573_of_match, }, }; diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index b202377135e7..3cf4cf6a6942 100644 --- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -378,7 +378,6 @@ static struct spi_driver ld9040_driver = { .remove = ld9040_remove, .driver = { .name = "panel-samsung-ld9040", - .owner = THIS_MODULE, .of_match_table = ld9040_of_match, }, }; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7c6225c84ba6..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -244,6 +244,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, ret = qxl_bo_reserve(bo, false); if (ret) return ret; + ret = qxl_bo_pin(bo, bo->type, NULL); + qxl_bo_unreserve(bo); + if (ret) + return ret; qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); @@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, } drm_vblank_put(dev, qcrtc->index); - qxl_bo_unreserve(bo); + ret = qxl_bo_reserve(bo, false); + if (!ret) { + qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + } return 0; } @@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { @@ -886,13 +894,15 @@ static enum drm_connector_status qxl_conn_detect( drm_connector_to_qxl_output(connector); struct drm_device *ddev = connector->dev; struct qxl_device *qdev = ddev->dev_private; - int connected; + bool connected = false; /* The first monitor is always connected */ - connected = (output->index == 0) || - (qdev->client_monitors_config && - qdev->client_monitors_config->count > output->index && - qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); + if (!qdev->client_monitors_config) { + if (output->index == 0) + connected = true; + } else + connected = qdev->client_monitors_config->count > output->index && + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); DRM_DEBUG("#%d connected: %d\n", output->index, connected); if (!connected) diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 41c422fee31a..c4a552637c93 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev, spin_lock_irqsave(&qfbdev->dirty.lock, flags); - if (qfbdev->dirty.y1 < y) - y = qfbdev->dirty.y1; - if (qfbdev->dirty.y2 > y2) - y2 = qfbdev->dirty.y2; - if (qfbdev->dirty.x1 < x) - x = qfbdev->dirty.x1; - if (qfbdev->dirty.x2 > x2) - x2 = qfbdev->dirty.x2; + if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) && + (qfbdev->dirty.x2 - qfbdev->dirty.x1)) { + if (qfbdev->dirty.y1 < y) + y = qfbdev->dirty.y1; + if (qfbdev->dirty.y2 > y2) + y2 = qfbdev->dirty.y2; + if (qfbdev->dirty.x1 < x) + x = qfbdev->dirty.x1; + if (qfbdev->dirty.x2 > x2) + x2 = qfbdev->dirty.x2; + } qfbdev->dirty.x1 = x; qfbdev->dirty.x2 = x2; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b66ec331c17c..4efa8e261baf 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); + bo = to_qxl_bo(entry->tv.bo); (*release)->release_offset = create_rel->release_offset + 64; @@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); - - qxl_bo_unref(&bo); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..bb292143997e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -237,6 +237,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, backlight_update_status(bd); DRM_INFO("radeon atom DIG backlight initialized\n"); + rdev->mode_info.bl_encoder = radeon_encoder; return; @@ -1624,8 +1625,14 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (rdev->mode_info.bl_encoder) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); + } else { + args.ucAction = ATOM_LCD_BLON; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } } break; case DRM_MODE_DPMS_STANDBY: @@ -1705,9 +1712,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (rdev->mode_info.bl_encoder) + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); + else + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + } if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f03b7eb15233..b6cbd816537e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1658,6 +1658,7 @@ struct radeon_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct radeon_dpm dpm; }; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d8319dae8358..f3f562f6d848 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); + drm_modeset_lock_all(dev); /* turn off display hw */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } + drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (fbcon) { drm_helper_resume_force_mode(dev); /* turn on display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d2e9e9efc159..6743174acdbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); - if (rdev->pm.dpm_enabled) { - /* do dpm late init */ - ret = radeon_pm_late_init(rdev); - if (ret) { - rdev->pm.dpm_enabled = false; - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); - } - /* set the dpm state for PX since there won't be - * a modeset to call this. - */ - radeon_pm_compute_clocks(rdev); - } + /* do pm late init */ + ret = radeon_pm_late_init(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..744f5c49c664 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ef99917f000d..c6ee80216cf4 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -194,7 +194,6 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, radeon_atom_backlight_init(radeon_encoder, connector); else radeon_legacy_backlight_init(radeon_encoder, connector); - rdev->mode_info.bl_encoder = radeon_encoder; } } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..26da2f4d7b4f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector { drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); } + +void radeon_fbdev_restore_mode(struct radeon_device *rdev) +{ + struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!rfbdev) + return; + + fb_helper = &rfbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4a119c255ba9..0e932bf932c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * radeon_driver_firstopen_kms - drm callback for last close + * radeon_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void radeon_driver_lastclose_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + radeon_fbdev_restore_mode(rdev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 45715307db71..30de43366eae 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -441,6 +441,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, backlight_update_status(bd); DRM_INFO("radeon legacy LVDS backlight initialized\n"); + rdev->mode_info.bl_encoder = radeon_encoder; return; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index aecc3e3dec0c..457b026a0972 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev); void radeon_fbdev_fini(struct radeon_device *rdev); void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); +void radeon_fbdev_restore_mode(struct radeon_device *rdev); void radeon_fb_output_poll_changed(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 05751f3f8444..5feee3b4c557 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct radeon_device *rdev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -1326,14 +1330,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { - /* where's the best place to put these? */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -1391,20 +1387,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) goto dpm_failed; rdev->pm.dpm_enabled = true; - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - /* XXX: these are noops for dpm but are here for backwards compat */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); } @@ -1545,9 +1527,51 @@ int radeon_pm_late_init(struct radeon_device *rdev) int ret = 0; if (rdev->pm.pm_method == PM_METHOD_DPM) { - mutex_lock(&rdev->pm.mutex); - ret = radeon_dpm_late_enable(rdev); - mutex_unlock(&rdev->pm.mutex); + if (rdev->pm.dpm_enabled) { + if (!rdev->pm.sysfs_initialized) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } + + mutex_lock(&rdev->pm.mutex); + ret = radeon_dpm_late_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } else { + /* set the dpm state for PX since there won't be + * a modeset to call this. + */ + radeon_pm_compute_clocks(rdev); + } + } + } else { + if ((rdev->pm.num_power_states > 1) && + (!rdev->pm.sysfs_initialized)) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } } return ret; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 787cd8fd897f..e72bf46042e0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2927,6 +2927,8 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d9b7de25613..745e996d2dbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret) return ret; man = &bdev->man[mem_type]; + if (!man->has_type || !man->use_type) + continue; type_ok = ttm_bo_mt_compatible(man, mem_type, place, &cur_flags); @@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!type_ok) continue; + type_found = true; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* @@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (mem_type == TTM_PL_SYSTEM) break; - if (man->has_type && man->use_type) { - type_found = true; - ret = (*man->func->get_node)(man, bo, place, mem); - if (unlikely(ret)) - return ret; - } + ret = (*man->func->get_node)(man, bo, place, mem); + if (unlikely(ret)) + return ret; + if (mem->mm_node) break; } @@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return 0; } - if (!type_found) - return -EINVAL; - for (i = 0; i < placement->num_busy_placement; ++i) { const struct ttm_place *place = &placement->busy_placement[i]; @@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret) return ret; man = &bdev->man[mem_type]; - if (!man->has_type) + if (!man->has_type || !man->use_type) continue; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) continue; + type_found = true; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* @@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret == -ERESTARTSYS) has_erestartsys = true; } - ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; - return ret; + + if (!type_found) { + printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); + return -EINVAL; + } + + return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; } EXPORT_SYMBOL(ttm_bo_mem_space); diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index db8b49101a8b..512263919282 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; - seq_printf(m, "fence %ld %lld\n", - atomic64_read(&vgdev->fence_drv.last_seq), + seq_printf(m, "fence %llu %lld\n", + (u64)atomic64_read(&vgdev->fence_drv.last_seq), vgdev->fence_drv.sync_seq); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..67097c9ce9c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); - snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq)); + snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } static const struct fence_ops virtio_fence_ops = { diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 67720f70fe29..b49445df8a7e 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -1,6 +1,6 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" - depends on DRM && PCI + depends on DRM && PCI && X86 select FB_DEFERRED_IO select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..6377e8151000 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, * * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has * command buffers left that are not submitted to hardware, Make sure - * IRQ handling is turned on. Otherwise, make sure it's turned off. This - * function may return -EAGAIN to indicate it should be rerun due to - * possibly missed IRQs if IRQs has just been turned on. + * IRQ handling is turned on. Otherwise, make sure it's turned off. */ -static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) +static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) { - int notempty = 0; + int notempty; struct vmw_cmdbuf_context *ctx; int i; +retry: + notempty = 0; for_each_cmdbuf_ctx(man, i, ctx) vmw_cmdbuf_ctx_process(man, ctx, ¬empty); @@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) man->irq_on = true; /* Rerun in case we just missed an irq. */ - return -EAGAIN; + goto retry; } - - return 0; } /** @@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, header->cb_context = cb_context; list_add_tail(&header->list, &man->ctx[cb_context].submitted); - if (vmw_cmdbuf_man_process(man) == -EAGAIN) - vmw_cmdbuf_man_process(man); + vmw_cmdbuf_man_process(man); } /** @@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data) struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; spin_lock(&man->lock); - if (vmw_cmdbuf_man_process(man) == -EAGAIN) - (void) vmw_cmdbuf_man_process(man); + vmw_cmdbuf_man_process(man); spin_unlock(&man->lock); } @@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) struct vmw_cmdbuf_man *man = container_of(work, struct vmw_cmdbuf_man, work); struct vmw_cmdbuf_header *entry, *next; + uint32_t dummy; bool restart = false; spin_lock_bh(&man->lock); @@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) if (restart && vmw_cmdbuf_startstop(man, true)) DRM_ERROR("Failed restarting command buffer context 0.\n"); + /* Send a new fence in case one was removed */ + vmw_fifo_send_fence(man->dev_priv, &dummy); } /** @@ -681,6 +680,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; @@ -1160,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); man->has_pool = true; - man->default_size = default_size; + + /* + * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to + * prevent deadlocks from happening when vmw_cmdbuf_space_pool() + * needs to wait for space and we block on further command + * submissions to be able to free up space. + */ + man->default_size = VMW_CMDBUF_INLINE_SIZE; DRM_INFO("Using command buffers with %s pool.\n", (man->using_mob) ? "MOB" : "DMA"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ce659a125f2b..092ea81eeff7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res, struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = val_buf->bo; struct vmw_fence_obj *fence; - int ret; if (list_empty(&res->mob_head)) return 0; @@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e13b20bd9908..2c7a25c71af2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; - - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, - dev_priv->mmio_size); - - dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, - dev_priv->mmio_size); + dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start, + dev_priv->mmio_size); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; @@ -913,7 +909,6 @@ out_no_device: out_err4: iounmap(dev_priv->mmio_virt); out_err3: - arch_phys_wc_del(dev_priv->mmio_mtrr); vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) @@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); - arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_ttm_global_release(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6d02de6dc36c..f19fd39b43e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -376,7 +376,6 @@ struct vmw_private { uint32_t initial_width; uint32_t initial_height; u32 __iomem *mmio_virt; - int mmio_mtrr; uint32_t capabilities; uint32_t max_gmr_ids; uint32_t max_gmr_pages; @@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, uint32_t size, bool shareable, uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf); + struct vmw_dma_buffer **p_dma_buf, + struct ttm_base_object **p_base); extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, struct vmw_dma_buffer *dma_buf, uint32_t *handle); @@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, uint32_t cur_validate_node); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_dma_buffer **out); + uint32_t id, struct vmw_dma_buffer **out, + struct ttm_base_object **base); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index b56565457c96..5da5de0cb522 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, + NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); ret = -EINVAL; @@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, + NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); ret = -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 61fb7f3de311..15a6c01cd016 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, struct drm_crtc *crtc; u32 num_units = 0; u32 i, k; - int ret; dirty->dev_priv = dev_priv; @@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, if (!dirty->cmd) { DRM_ERROR("Couldn't reserve fifo space " "for dirty blits.\n"); - return ret; + return -ENOMEM; } memset(dirty->cmd, 0, dirty->fifo_reserve_size); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 76069f093ccf..222c9c2123a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c1912f852b42..e57667ca7557 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); + ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); return ret; } @@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, uint32_t size, bool shareable, uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf) + struct vmw_dma_buffer **p_dma_buf, + struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *user_bo; struct ttm_buffer_object *tmp; @@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, } *p_dma_buf = &user_bo->dma; + if (p_base) { + *p_base = &user_bo->prime.base; + kref_get(&(*p_base)->refcount); + } *handle = user_bo->prime.base.hash.key; out_no_base_object: @@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, struct vmw_dma_buffer *dma_buf; struct vmw_user_dma_buffer *user_bo; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct ttm_base_object *buffer_base; int ret; if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 @@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, switch (arg->op) { case drm_vmw_synccpu_grab: - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, + &buffer_base); if (unlikely(ret != 0)) return ret; @@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, dma); ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); vmw_dmabuf_unreference(&dma_buf); + ttm_base_object_unref(&buffer_base); if (unlikely(ret != 0 && ret != -ERESTARTSYS && ret != -EBUSY)) { DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", @@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf); + req->size, false, &handle, &dma_buf, + NULL); if (unlikely(ret != 0)) goto out_no_dmabuf; @@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, } int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_dma_buffer **out) + uint32_t handle, struct vmw_dma_buffer **out, + struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *vmw_user_bo; struct ttm_base_object *base; @@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); - ttm_base_object_unref(&base); + if (p_base) + *p_base = base; + else + ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; return 0; @@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, args->size, false, &args->handle, - &dma_buf); + &dma_buf, NULL); if (unlikely(ret != 0)) goto out_no_dmabuf; @@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, struct vmw_dma_buffer *out_buf; int ret; - ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); + ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); if (ret != 0) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index bba1ee395478..fd47547b0234 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, if (buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, - &buffer); + &buffer, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find buffer for shader " "creation.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 3361769842f4..03f63c749c02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -46,6 +46,7 @@ struct vmw_user_surface { struct vmw_surface srf; uint32_t size; struct drm_master *master; + struct ttm_base_object *backup_base; }; /** @@ -656,6 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; + if (user_srf->backup_base) + ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } @@ -851,7 +854,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, res->backup_size, true, &backup_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; @@ -1321,7 +1325,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, if (req->buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < res->backup_size) { DRM_ERROR("Surface backup buffer is too small.\n"); @@ -1335,7 +1340,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, req->drm_surface_flags & drm_vmw_surface_flag_shareable, &backup_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); |