diff options
author | Dave Airlie <airlied@redhat.com> | 2016-10-28 04:33:52 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-10-28 04:33:52 +0300 |
commit | 220196b38483be6d84a295d318d48595f65da443 (patch) | |
tree | f91c2e6e64ef59afdc075d843d51f23369e9164a /drivers/gpu | |
parent | a1873c62710b23e9afbd2faeed5f28649cbe4739 (diff) | |
parent | 56df51d003203f1c3a8eab05605973515aa15feb (diff) | |
download | linux-220196b38483be6d84a295d318d48595f65da443.tar.xz |
Merge tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next
Pull request already again to get the s/fence/dma_fence/ stuff in and
allow everyone to resync. Otherwise really just misc stuff all over, and a
new bridge driver.
* tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel:
drm/bridge: fix platform_no_drv_owner.cocci warnings
drm/bridge: fix semicolon.cocci warnings
drm: Print some debug/error info during DP dual mode detect
drm: mark drm_of_component_match_add dummy inline
drm/bridge: add Silicon Image SiI8620 driver
dt-bindings: add Silicon Image SiI8620 bridge bindings
video: add header file for Mobile High-Definition Link (MHL) interface
drm: convert DT component matching to component_match_add_release()
dma-buf: Rename struct fence to dma_fence
dma-buf/fence: add an lockdep_assert_held()
drm/dp: Factor out helper to distinguish between branch and sink devices
drm/edid: Only print the bad edid when aborting
drm/msm: add missing header dependencies
drm/msm/adreno: move function declarations to header file
drm/i2c/tda998x: mark symbol static where possible
doc: add missing docbook parameter for fence-array
drm: RIP mode_config->rotation_property
drm/msm/mdp5: Advertize 180 degree rotation
drm/msm/mdp5: Use per-plane rotation property
Diffstat (limited to 'drivers/gpu')
112 files changed, 3947 insertions, 777 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 217df2459a98..2ec7b3baeec2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -34,7 +34,7 @@ #include <linux/kref.h> #include <linux/interval_tree.h> #include <linux/hashtable.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -359,7 +359,7 @@ struct amdgpu_bo_va_mapping { struct amdgpu_bo_va { /* protected by bo being reserved */ struct list_head bo_list; - struct fence *last_pt_update; + struct dma_fence *last_pt_update; unsigned ref_count; /* protected by vm mutex and spinlock */ @@ -474,7 +474,7 @@ struct amdgpu_sa_bo { struct amdgpu_sa_manager *manager; unsigned soffset; unsigned eoffset; - struct fence *fence; + struct dma_fence *fence; }; /* @@ -613,10 +613,10 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_abo; - struct fence *excl; + struct dma_fence *excl; unsigned shared_count; - struct fence **shared; - struct fence_cb cb; + struct dma_fence **shared; + struct dma_fence_cb cb; bool async; }; @@ -644,7 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, - struct fence **f); + struct dma_fence **f); /* * context related structures @@ -652,7 +652,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amdgpu_ctx_ring { uint64_t sequence; - struct fence **fences; + struct dma_fence **fences; struct amd_sched_entity entity; }; @@ -661,7 +661,7 @@ struct amdgpu_ctx { struct amdgpu_device *adev; unsigned reset_counter; spinlock_t ring_lock; - struct fence **fences; + struct dma_fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; bool preamble_presented; }; @@ -677,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct fence *fence); -struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct dma_fence *fence); +struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t seq); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, @@ -889,10 +889,10 @@ struct amdgpu_gfx { int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, - struct fence *f); + struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ib, struct fence *last_vm_update, - struct amdgpu_job *job, struct fence **f); + struct amdgpu_ib *ib, struct dma_fence *last_vm_update, + struct amdgpu_job *job, struct dma_fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -923,7 +923,7 @@ struct amdgpu_cs_parser { struct amdgpu_bo_list *bo_list; struct amdgpu_bo_list_entry vm_pd; struct list_head validated; - struct fence *fence; + struct dma_fence *fence; uint64_t bytes_moved_threshold; uint64_t bytes_moved; struct amdgpu_bo_list_entry *evictable; @@ -943,7 +943,7 @@ struct amdgpu_job { struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_ib *ibs; - struct fence *fence; /* the hw fence */ + struct dma_fence *fence; /* the hw fence */ uint32_t preamble_status; uint32_t num_ibs; void *owner; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 345305235349..cc97eee93226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; int i, r; start_jiffies = jiffies; @@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, false); if (r) goto exit_do_move; - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) goto exit_do_move; - fence_put(fence); + dma_fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: if (fence) - fence_put(fence); + dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf03f9f01f40..a024217896fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -735,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } - fence_put(parser->fence); + dma_fence_put(parser->fence); if (parser->ctx) amdgpu_ctx_put(parser->ctx); @@ -772,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (p->bo_list) { for (i = 0; i < p->bo_list->num_entries; i++) { - struct fence *f; + struct dma_fence *f; /* ignore duplicates */ bo = p->bo_list->array[i].robj; @@ -973,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, for (j = 0; j < num_deps; ++j) { struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; - struct fence *fence; + struct dma_fence *fence; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -995,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, } else if (fence) { r = amdgpu_sync_fence(adev, &p->job->sync, fence); - fence_put(fence); + dma_fence_put(fence); amdgpu_ctx_put(ctx); if (r) return r; @@ -1025,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->fence_ctx = entity->fence_context; - p->fence = fence_get(&job->base.s_fence->finished); + p->fence = dma_fence_get(&job->base.s_fence->finished); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); @@ -1108,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; - struct fence *fence; + struct dma_fence *fence; long r; r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, @@ -1124,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (IS_ERR(fence)) r = PTR_ERR(fence); else if (fence) { - r = fence_wait_timeout(fence, true, timeout); - fence_put(fence); + r = dma_fence_wait_timeout(fence, true, timeout); + dma_fence_put(fence); } else r = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6d86eaef934c..400c66ba4c6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, - sizeof(struct fence*), GFP_KERNEL); + sizeof(struct dma_fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; @@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (j = 0; j < amdgpu_sched_jobs; ++j) - fence_put(ctx->rings[i].fences[j]); + dma_fence_put(ctx->rings[i].fences[j]); kfree(ctx->fences); ctx->fences = NULL; @@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) } uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct fence *fence) + struct dma_fence *fence) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; uint64_t seq = cring->sequence; unsigned idx = 0; - struct fence *other = NULL; + struct dma_fence *other = NULL; idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; if (other) { signed long r; - r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); if (r < 0) DRM_ERROR("Error (%ld) waiting for fence!\n", r); } - fence_get(fence); + dma_fence_get(fence); spin_lock(&ctx->ring_lock); cring->fences[idx] = fence; cring->sequence++; spin_unlock(&ctx->ring_lock); - fence_put(other); + dma_fence_put(other); return seq; } -struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq) +struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; - struct fence *fence; + struct dma_fence *fence; spin_lock(&ctx->ring_lock); @@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return NULL; } - fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); + fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); spin_unlock(&ctx->ring_lock); return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3b9b58debabd..6958d4af017f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1620,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; - adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->smc_rreg = &amdgpu_invalid_rreg; adev->smc_wreg = &amdgpu_invalid_wreg; @@ -2215,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, - struct fence **fence) + struct dma_fence **fence) { uint32_t domain; int r; @@ -2334,30 +2334,30 @@ retry: if (need_full_reset && amdgpu_need_backup(adev)) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *bo, *tmp; - struct fence *fence = NULL, *next = NULL; + struct dma_fence *fence = NULL, *next = NULL; DRM_INFO("recover vram bo from shadow\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); if (fence) { - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { WARN(r, "recovery from shadow isn't comleted\n"); break; } } - fence_put(fence); + dma_fence_put(fence); fence = next; } mutex_unlock(&adev->shadow_list_lock); if (fence) { - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) WARN(r, "recovery from shadow isn't comleted\n"); } - fence_put(fence); + dma_fence_put(fence); } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index c7bc2b3c1b97..741144fcc7bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,29 +35,29 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> -static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) +static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) { struct amdgpu_flip_work *work = container_of(cb, struct amdgpu_flip_work, cb); - fence_put(f); + dma_fence_put(f); schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, - struct fence **f) + struct dma_fence **f) { - struct fence *fence= *f; + struct dma_fence *fence= *f; if (fence == NULL) return false; *f = NULL; - if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) + if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) return true; - fence_put(fence); + dma_fence_put(fence); return false; } @@ -244,9 +244,9 @@ unreserve: cleanup: amdgpu_bo_unref(&work->old_abo); - fence_put(work->excl); + dma_fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) - fence_put(work->shared[i]); + dma_fence_put(work->shared[i]); kfree(work->shared); kfree(work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..57552c79ec58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -48,7 +48,7 @@ */ struct amdgpu_fence { - struct fence base; + struct dma_fence base; /* RB, DMA, etc. */ struct amdgpu_ring *ring; @@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void) /* * Cast helper */ -static const struct fence_ops amdgpu_fence_ops; -static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) +static const struct dma_fence_ops amdgpu_fence_ops; +static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct fence *old, **ptr; + struct dma_fence *old, **ptr; uint32_t seq; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); @@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) seq = ++ring->fence_drv.sync_seq; fence->ring = ring; - fence_init(&fence->base, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); + dma_fence_init(&fence->base, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, + seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, AMDGPU_FENCE_FLAG_INT); @@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) * emitting the fence would mess up the hardware ring buffer. */ old = rcu_dereference_protected(*ptr, 1); - if (old && !fence_is_signaled(old)) { + if (old && !dma_fence_is_signaled(old)) { DRM_INFO("rcu slot is busy\n"); - fence_wait(old, false); + dma_fence_wait(old, false); } - rcu_assign_pointer(*ptr, fence_get(&fence->base)); + rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; @@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) seq &= drv->num_fences_mask; do { - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; ++last_seq; last_seq &= drv->num_fences_mask; @@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = fence_signal(fence); + r = dma_fence_signal(fence); if (!r) - FENCE_TRACE(fence, "signaled from irq context\n"); + DMA_FENCE_TRACE(fence, "signaled from irq context\n"); else BUG(); - fence_put(fence); + dma_fence_put(fence); } while (last_seq != seq); } @@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; int r; if (!seq) @@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; rcu_read_lock(); fence = rcu_dereference(*ptr); - if (!fence || !fence_get_rcu(fence)) { + if (!fence || !dma_fence_get_rcu(fence)) { rcu_read_unlock(); return 0; } rcu_read_unlock(); - r = fence_wait(fence, false); - fence_put(fence); + r = dma_fence_wait(fence, false); + dma_fence_put(fence); return r; } @@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - fence_put(ring->fence_drv.fences[j]); + dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.fences = NULL; ring->fence_drv.initialized = false; @@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) * Common fence implementation */ -static const char *amdgpu_fence_get_driver_name(struct fence *fence) +static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) { return "amdgpu"; } -static const char *amdgpu_fence_get_timeline_name(struct fence *f) +static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); return (const char *)fence->ring->name; @@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ -static bool amdgpu_fence_enable_signaling(struct fence *f) +static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_ring *ring = fence->ring; @@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } @@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) */ static void amdgpu_fence_free(struct rcu_head *rcu) { - struct fence *f = container_of(rcu, struct fence, rcu); + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct amdgpu_fence *fence = to_amdgpu_fence(f); kmem_cache_free(amdgpu_fence_slab, fence); } @@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amdgpu_fence_release(struct fence *f) +static void amdgpu_fence_release(struct dma_fence *f) { call_rcu(&f->rcu, amdgpu_fence_free); } -static const struct fence_ops amdgpu_fence_ops = { +static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amdgpu_fence_release, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 16308eb22e7f..216a9572d946 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Free an IB (all asics). */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, - struct fence *f) + struct dma_fence *f) { amdgpu_sa_bo_free(adev, &ib->sa_bo, f); } @@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ibs, struct fence *last_vm_update, - struct amdgpu_job *job, struct fence **f) + struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, + struct amdgpu_job *job, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 8c5807994073..a0de6286c453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, void amdgpu_job_free_resources(struct amdgpu_job *job) { - struct fence *f; + struct dma_fence *f; unsigned i; /* use sched fence if available */ @@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - fence_put(job->fence); + dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); kfree(job); } @@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job) { amdgpu_job_free_resources(job); - fence_put(job->fence); + dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, - struct fence **f) + struct dma_fence **f) { int r; job->ring = ring; @@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, job->owner = owner; job->fence_ctx = entity->fence_context; - *f = fence_get(&job->base.s_fence->finished); + *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; } -static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; - struct fence *fence = amdgpu_sync_get_fence(&job->sync); + struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); if (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; @@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) return fence; } -static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) { - struct fence *fence = NULL; + struct dma_fence *fence = NULL; struct amdgpu_job *job; int r; @@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) DRM_ERROR("Error scheduling IBs (%d)\n", r); /* if gpu reset, hw fence will be replaced here */ - fence_put(job->fence); - job->fence = fence_get(fence); + dma_fence_put(job->fence); + job->fence = dma_fence_get(fence); amdgpu_job_free_resources(job); return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6efa8d73b394..f0a0513ef4c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -391,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { - struct fence *fence; + struct dma_fence *fence; if (adev->mman.buffer_funcs_ring == NULL || !adev->mman.buffer_funcs_ring->ready) { @@ -411,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); amdgpu_bo_fence(bo, fence, false); amdgpu_bo_unreserve(bo); - fence_put(bo->tbo.moving); - bo->tbo.moving = fence_get(fence); - fence_put(fence); + dma_fence_put(bo->tbo.moving); + bo->tbo.moving = dma_fence_get(fence); + dma_fence_put(fence); } *bo_ptr = bo; @@ -499,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct) { @@ -531,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct) { @@ -941,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) * @shared: true if fence should be added shared * */ -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared) { struct reservation_object *resv = bo->tbo.resv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d3baf834ac24..5cbf59ec0f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -157,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, bool direct); + struct dma_fence **fence, bool direct); int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct); @@ -201,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, unsigned size, unsigned align); void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct fence *fence); + struct dma_fence *fence); #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 1ee1b65d7eff..f2ad49c8e85b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -67,7 +67,7 @@ struct amdgpu_fence_driver { struct timer_list fallback_timer; unsigned num_fences_mask; spinlock_t lock; - struct fence **fences; + struct dma_fence **fences; }; int amdgpu_fence_driver_init(struct amdgpu_device *adev); @@ -81,7 +81,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, unsigned irq_type); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index d8af37a845f4..fd26c4b8d793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) } list_del_init(&sa_bo->olist); list_del_init(&sa_bo->flist); - fence_put(sa_bo->fence); + dma_fence_put(sa_bo->fence); kfree(sa_bo); } @@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { if (sa_bo->fence == NULL || - !fence_is_signaled(sa_bo->fence)) { + !dma_fence_is_signaled(sa_bo->fence)) { return; } amdgpu_sa_bo_remove_locked(sa_bo); @@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, } static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, - struct fence **fences, + struct dma_fence **fences, unsigned *tries) { struct amdgpu_sa_bo *best_bo = NULL; @@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, sa_bo = list_first_entry(&sa_manager->flist[i], struct amdgpu_sa_bo, flist); - if (!fence_is_signaled(sa_bo->fence)) { + if (!dma_fence_is_signaled(sa_bo->fence)) { fences[i] = sa_bo->fence; continue; } @@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, struct amdgpu_sa_bo **sa_bo, unsigned size, unsigned align) { - struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; + struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; unsigned count; int i, r; @@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) if (fences[i]) - fences[count++] = fence_get(fences[i]); + fences[count++] = dma_fence_get(fences[i]); if (count) { spin_unlock(&sa_manager->wq.lock); - t = fence_wait_any_timeout(fences, count, false, - MAX_SCHEDULE_TIMEOUT); + t = dma_fence_wait_any_timeout(fences, count, false, + MAX_SCHEDULE_TIMEOUT); for (i = 0; i < count; ++i) - fence_put(fences[i]); + dma_fence_put(fences[i]); r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); @@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, } void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct fence *fence) + struct dma_fence *fence) { struct amdgpu_sa_manager *sa_manager; @@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, sa_manager = (*sa_bo)->manager; spin_lock(&sa_manager->wq.lock); - if (fence && !fence_is_signaled(fence)) { + if (fence && !dma_fence_is_signaled(fence)) { uint32_t idx; - (*sa_bo)->fence = fence_get(fence); + (*sa_bo)->fence = dma_fence_get(fence); idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 5c8d3022fb87..ed814e6d0207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -34,7 +34,7 @@ struct amdgpu_sync_entry { struct hlist_node node; - struct fence *fence; + struct dma_fence *fence; }; static struct kmem_cache *amdgpu_sync_slab; @@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) * * Test if the fence was issued by us. */ -static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) +static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, + struct dma_fence *f) { struct amd_sched_fence *s_fence = to_amd_sched_fence(f); @@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) * * Extract who originally created the fence. */ -static void *amdgpu_sync_get_owner(struct fence *f) +static void *amdgpu_sync_get_owner(struct dma_fence *f) { struct amd_sched_fence *s_fence = to_amd_sched_fence(f); @@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f) * * Either keep the existing fence or the new one, depending which one is later. */ -static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) +static void amdgpu_sync_keep_later(struct dma_fence **keep, + struct dma_fence *fence) { - if (*keep && fence_is_later(*keep, fence)) + if (*keep && dma_fence_is_later(*keep, fence)) return; - fence_put(*keep); - *keep = fence_get(fence); + dma_fence_put(*keep); + *keep = dma_fence_get(fence); } /** @@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) * Tries to add the fence to an existing hash entry. Returns true when an entry * was found, false otherwise. */ -static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) { struct amdgpu_sync_entry *e; @@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) * */ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *f) + struct dma_fence *f) { struct amdgpu_sync_entry *e; @@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -ENOMEM; hash_add(sync->fences, &e->node, f->context); - e->fence = fence_get(f); + e->fence = dma_fence_get(f); return 0; } @@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, void *owner) { struct reservation_object_list *flist; - struct fence *f; + struct dma_fence *f; void *fence_owner; unsigned i; int r = 0; @@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, * Returns the next fence not signaled yet without removing it from the sync * object. */ -struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, - struct amdgpu_ring *ring) +struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { - struct fence *f = e->fence; + struct dma_fence *f = e->fence; struct amd_sched_fence *s_fence = to_amd_sched_fence(f); if (ring && s_fence) { @@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * when they are scheduled. */ if (s_fence->sched == &ring->sched) { - if (fence_is_signaled(&s_fence->scheduled)) + if (dma_fence_is_signaled(&s_fence->scheduled)) continue; return &s_fence->scheduled; } } - if (fence_is_signaled(f)) { + if (dma_fence_is_signaled(f)) { hash_del(&e->node); - fence_put(f); + dma_fence_put(f); kmem_cache_free(amdgpu_sync_slab, e); continue; } @@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * * Get and removes the next fence from the sync object not signaled yet. */ -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - struct fence *f; + struct dma_fence *f; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { @@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) hash_del(&e->node); kmem_cache_free(amdgpu_sync_slab, e); - if (!fence_is_signaled(f)) + if (!dma_fence_is_signaled(f)) return f; - fence_put(f); + dma_fence_put(f); } return NULL; } @@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) hash_for_each_safe(sync->fences, i, tmp, e, node) { hash_del(&e->node); - fence_put(e->fence); + dma_fence_put(e->fence); kmem_cache_free(amdgpu_sync_slab, e); } - fence_put(sync->last_vm_update); + dma_fence_put(sync->last_vm_update); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 405f379ac186..605be266e07f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -26,7 +26,7 @@ #include <linux/hashtable.h> -struct fence; +struct dma_fence; struct reservation_object; struct amdgpu_device; struct amdgpu_ring; @@ -36,19 +36,19 @@ struct amdgpu_ring; */ struct amdgpu_sync { DECLARE_HASHTABLE(fences, 4); - struct fence *last_vm_update; + struct dma_fence *last_vm_update; }; void amdgpu_sync_create(struct amdgpu_sync *sync); int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *f); + struct dma_fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, +struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b827c75e95de..e05a24325eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) void *gtt_map, *vram_map; void **gtt_start, **gtt_end; void **vram_start, **vram_end; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, @@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); goto out_lclean_unpin; } - fence_put(fence); + dma_fence_put(fence); r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); goto out_lclean_unpin; } - fence_put(fence); + dma_fence_put(fence); r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { @@ -216,7 +216,7 @@ out_lclean: amdgpu_bo_unref(>t_obj[i]); } if (fence) - fence_put(fence); + dma_fence_put(fence); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 067e5e683bb3..bb964a8ff938 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(struct amdgpu_device *, adev) __field(struct amd_sched_job *, sched_job) __field(struct amdgpu_ib *, ib) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(char *, ring_name) __field(u32, num_ibs) ), @@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __field(struct amdgpu_device *, adev) __field(struct amd_sched_job *, sched_job) __field(struct amdgpu_ib *, ib) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(char *, ring_name) __field(u32, num_ibs) ), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f1a206df9823..1821c05484d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -287,7 +287,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct drm_mm_node *old_mm, *new_mm; uint64_t old_start, old_size, new_start, new_size; unsigned long num_pages; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; int r; BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); @@ -313,7 +313,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, num_pages = new_mem->num_pages; while (num_pages) { unsigned long cur_pages = min(old_size, new_size); - struct fence *next; + struct dma_fence *next; r = amdgpu_copy_buffer(ring, old_start, new_start, cur_pages * PAGE_SIZE, @@ -321,7 +321,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, if (r) goto error; - fence_put(fence); + dma_fence_put(fence); fence = next; num_pages -= cur_pages; @@ -353,13 +353,13 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); - fence_put(fence); + dma_fence_put(fence); return r; error: if (fence) - fence_wait(fence, false); - fence_put(fence); + dma_fence_wait(fence, false); + dma_fence_put(fence); return r; } @@ -1316,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence, bool direct_submit) + struct dma_fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1363,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, if (direct_submit) { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, NULL, fence); - job->fence = fence_get(*fence); + job->fence = dma_fence_get(*fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); amdgpu_job_free(job); @@ -1384,7 +1384,7 @@ error_free: int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_job *job; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index d1c00c04782f..98ee384f0fca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -78,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence, bool direct_submit); + struct dma_fence **fence, bool direct_submit); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, - struct fence **fence); + struct dma_fence **fence); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 1b54cc218b47..fb270c7e7171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) for (i = 0; i < adev->uvd.max_handles; ++i) { uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (handle != 0 && adev->uvd.filp[i] == filp) { - struct fence *fence; + struct dma_fence *fence; r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); @@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) continue; } - fence_wait(fence, false); - fence_put(fence); + dma_fence_wait(fence, false); + dma_fence_put(fence); adev->uvd.filp[i] = NULL; atomic_set(&adev->uvd.handles[i], 0); @@ -912,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) } static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; struct amdgpu_device *adev = ring->adev; uint64_t addr; int i, r; @@ -963,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err_free; @@ -978,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ttm_eu_fence_buffer_objects(&ticket, &head, f); if (fence) - *fence = fence_get(f); + *fence = dma_fence_get(f); amdgpu_bo_unref(&bo); - fence_put(f); + dma_fence_put(f); return 0; @@ -996,7 +996,7 @@ err: crash the vcpu so just try to emmit a dummy create/destroy msg to avoid this */ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1046,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1133,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) */ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct fence *fence; + struct dma_fence *fence; long r; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); @@ -1148,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = fence_wait_timeout(fence, false, timeout); + r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -1159,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } - fence_put(fence); + dma_fence_put(fence); error: return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index c850009602d1..6249ba1bde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + struct dma_fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence); + bool direct, struct dma_fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 3d6f86cd028f..69b66b9e7f57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -396,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint64_t dummy; int i, r; @@ -451,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err; amdgpu_job_free(job); if (fence) - *fence = fence_get(f); - fence_put(f); + *fence = dma_fence_get(f); + dma_fence_put(f); return 0; err: @@ -477,12 +477,12 @@ err: * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -514,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err; @@ -527,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, } if (fence) - *fence = fence_get(f); - fence_put(f); + *fence = dma_fence_get(f); + dma_fence_put(f); return 0; err: @@ -965,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) */ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct fence *fence = NULL; + struct dma_fence *fence = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ @@ -984,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = fence_wait_timeout(fence, false, timeout); + r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -995,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } error: - fence_put(fence); + dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 44d49b576513..d98041f7508d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + struct dma_fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence); + bool direct, struct dma_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ded57dd538e2..e480263387e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,7 +25,7 @@ * Alex Deucher * Jerome Glisse */ -#include <linux/fence-array.h> +#include <linux/dma-fence-array.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -199,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, * Allocate an id for the vm, adding fences to the sync obj as necessary. */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence, + struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; uint64_t fence_context = adev->fence_context + ring->idx; - struct fence *updates = sync->last_vm_update; + struct dma_fence *updates = sync->last_vm_update; struct amdgpu_vm_id *id, *idle; - struct fence **fences; + struct dma_fence **fences; unsigned i; int r = 0; @@ -230,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (&idle->list == &adev->vm_manager.ids_lru) { u64 fence_context = adev->vm_manager.fence_context + ring->idx; unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct fence_array *array; + struct dma_fence_array *array; unsigned j; for (j = 0; j < i; ++j) - fence_get(fences[j]); + dma_fence_get(fences[j]); - array = fence_array_create(i, fences, fence_context, + array = dma_fence_array_create(i, fences, fence_context, seqno, true); if (!array) { for (j = 0; j < i; ++j) - fence_put(fences[j]); + dma_fence_put(fences[j]); kfree(fences); r = -ENOMEM; goto error; @@ -248,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, r = amdgpu_sync_fence(ring->adev, sync, &array->base); - fence_put(&array->base); + dma_fence_put(&array->base); if (r) goto error; @@ -262,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check if we can use a VMID already assigned to this VM */ i = ring->idx; do { - struct fence *flushed; + struct dma_fence *flushed; id = vm->ids[i++]; if (i == AMDGPU_MAX_RINGS) @@ -284,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, continue; if (id->last_flush->context != fence_context && - !fence_is_signaled(id->last_flush)) + !dma_fence_is_signaled(id->last_flush)) continue; flushed = id->flushed_updates; if (updates && - (!flushed || fence_is_later(updates, flushed))) + (!flushed || dma_fence_is_later(updates, flushed))) continue; /* Good we can use this VMID. Remember this submission as @@ -320,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - fence_put(id->first); - id->first = fence_get(fence); + dma_fence_put(id->first); + id->first = dma_fence_get(fence); - fence_put(id->last_flush); + dma_fence_put(id->last_flush); id->last_flush = NULL; - fence_put(id->flushed_updates); - id->flushed_updates = fence_get(updates); + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); id->pd_gpu_addr = job->vm_pd_addr; id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); @@ -398,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || amdgpu_vm_is_gpu_reset(adev, id))) { - struct fence *fence; + struct dma_fence *fence; trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); @@ -408,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) return r; mutex_lock(&adev->vm_manager.lock); - fence_put(id->last_flush); + dma_fence_put(id->last_flush); id->last_flush = fence; mutex_unlock(&adev->vm_manager.lock); } @@ -542,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) { struct amdgpu_ring *ring; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; struct amdgpu_job *job; struct amdgpu_pte_update_params params; unsigned entries; @@ -583,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(bo, fence, true); - fence_put(fence); + dma_fence_put(fence); return 0; error_free: @@ -640,7 +640,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; struct amdgpu_pte_update_params params; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; int r; @@ -750,9 +750,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(vm->page_directory, fence, true); - fence_put(vm->page_directory_fence); - vm->page_directory_fence = fence_get(fence); - fence_put(fence); + dma_fence_put(vm->page_directory_fence); + vm->page_directory_fence = dma_fence_get(fence); + dma_fence_put(fence); return 0; @@ -938,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct fence *exclusive, + struct dma_fence *exclusive, uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint32_t flags, uint64_t addr, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_ring *ring; void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; struct amdgpu_pte_update_params params; - struct fence *f = NULL; + struct dma_fence *f = NULL; int r; memset(¶ms, 0, sizeof(params)); @@ -1054,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, amdgpu_bo_fence(vm->page_directory, f, true); if (fence) { - fence_put(*fence); - *fence = fence_get(f); + dma_fence_put(*fence); + *fence = dma_fence_get(f); } - fence_put(f); + dma_fence_put(f); return 0; error_free: @@ -1083,14 +1083,14 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, - struct fence *exclusive, + struct dma_fence *exclusive, uint32_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint32_t flags, struct drm_mm_node *nodes, - struct fence **fence) + struct dma_fence **fence) { uint64_t pfn, src = 0, start = mapping->it.start; int r; @@ -1178,7 +1178,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, uint32_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; - struct fence *exclusive; + struct dma_fence *exclusive; int r; if (clear) { @@ -1562,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, kfree(mapping); } - fence_put(bo_va->last_pt_update); + dma_fence_put(bo_va->last_pt_update); kfree(bo_va); } @@ -1725,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); - fence_put(vm->page_directory_fence); + dma_fence_put(vm->page_directory_fence); } /** @@ -1749,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) &adev->vm_manager.ids_lru); } - adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + adev->vm_manager.fence_context = + dma_fence_context_alloc(AMDGPU_MAX_RINGS); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; @@ -1771,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_NUM_VM; ++i) { struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; - fence_put(adev->vm_manager.ids[i].first); + dma_fence_put(adev->vm_manager.ids[i].first); amdgpu_sync_free(&adev->vm_manager.ids[i].active); - fence_put(id->flushed_updates); + dma_fence_put(id->flushed_updates); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 42a629b56095..adbc2f5e5c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -94,7 +94,7 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_bo *page_directory; unsigned max_pde_used; - struct fence *page_directory_fence; + struct dma_fence *page_directory_fence; uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ @@ -115,14 +115,14 @@ struct amdgpu_vm { struct amdgpu_vm_id { struct list_head list; - struct fence *first; + struct dma_fence *first; struct amdgpu_sync active; - struct fence *last_flush; + struct dma_fence *last_flush; atomic64_t owner; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct fence *flushed_updates; + struct dma_fence *flushed_updates; uint32_t current_gpu_reset_count; @@ -172,7 +172,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence, + struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c7340b6e17c9..4c34dbc7a254 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 96dd05dca694..21c086e02e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 903aa240e946..5b631fd1a879 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1c2544f314c0..86a7ca5d8511 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; @@ -1564,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; int r, i; u32 tmp; unsigned total_size, vgpr_offset, sgpr_offset; @@ -1697,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) } /* wait for the GPU to finish processing the IB */ - r = fence_wait(f, false); + r = dma_fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto fail; @@ -1718,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) fail: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 03e8856b08ce..e81aa4682760 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6172d01e985a..77f146587c60 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 14265c5c349e..3dd552ae0b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index b961a1c6caf3..dbd4fd3a810b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job, TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) __field(struct amd_sched_job *, sched_job) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job, TP_PROTO(struct amd_sched_fence *fence), TP_ARGS(fence), TP_STRUCT__entry( - __field(struct fence *, fence) + __field(struct dma_fence *, fence) ), TP_fast_assign( diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..5364e6a7ec8f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,7 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(2); + entity->fence_context = dma_fence_context_alloc(2); return 0; } @@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, kfifo_free(&entity->job_queue); } -static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_entity *entity = container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; - fence_put(f); + dma_fence_put(f); amd_sched_wakeup(entity->sched); } -static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_entity *entity = container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; - fence_put(f); + dma_fence_put(f); } static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; - struct fence * fence = entity->dependency; + struct dma_fence * fence = entity->dependency; struct amd_sched_fence *s_fence; if (fence->context == entity->fence_context) { /* We can ignore fences from ourself */ - fence_put(entity->dependency); + dma_fence_put(entity->dependency); return false; } @@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) * Fence is from the same scheduler, only need to wait for * it to be scheduled */ - fence = fence_get(&s_fence->scheduled); - fence_put(entity->dependency); + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); entity->dependency = fence; - if (!fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) + if (!dma_fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) return true; /* Ignore it when it is already scheduled */ - fence_put(fence); + dma_fence_put(fence); return false; } - if (!fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) return true; - fence_put(entity->dependency); + dma_fence_put(entity->dependency); return false; } @@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work) sched->ops->free_job(s_job); } -static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +static void amd_sched_job_finish_cb(struct dma_fence *f, + struct dma_fence_cb *cb) { struct amd_sched_job *job = container_of(cb, struct amd_sched_job, finish_cb); @@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) spin_lock(&sched->job_list_lock); list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { - if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { - fence_put(s_job->s_fence->parent); + if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + dma_fence_put(s_job->s_fence->parent); s_job->s_fence->parent = NULL; } } @@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct amd_sched_fence *s_fence = s_job->s_fence; - struct fence *fence; + struct dma_fence *fence; spin_unlock(&sched->job_list_lock); fence = sched->ops->run_job(s_job); atomic_inc(&sched->hw_rq_count); if (fence) { - s_fence->parent = fence_get(fence); - r = fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); - fence_put(fence); + dma_fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); @@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) struct amd_sched_entity *entity = sched_job->s_entity; trace_amd_sched_job(sched_job); - fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, - amd_sched_job_finish_cb); + dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } @@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched) return entity; } -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); @@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->finished); + dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } @@ -547,7 +548,7 @@ static int amd_sched_main(void *param) struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; - struct fence *fence; + struct dma_fence *fence; wait_event_interruptible(sched->wake_up_worker, (!amd_sched_blocked(sched) && @@ -569,15 +570,15 @@ static int amd_sched_main(void *param) fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { - s_fence->parent = fence_get(fence); - r = fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); - fence_put(fence); + dma_fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..876aa43b57df 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -25,7 +25,7 @@ #define _GPU_SCHEDULER_H_ #include <linux/kfifo.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> struct amd_gpu_scheduler; struct amd_sched_rq; @@ -50,8 +50,8 @@ struct amd_sched_entity { atomic_t fence_seq; uint64_t fence_context; - struct fence *dependency; - struct fence_cb cb; + struct dma_fence *dependency; + struct dma_fence_cb cb; }; /** @@ -66,10 +66,10 @@ struct amd_sched_rq { }; struct amd_sched_fence { - struct fence scheduled; - struct fence finished; - struct fence_cb cb; - struct fence *parent; + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -79,15 +79,15 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - struct fence_cb finish_cb; + struct dma_fence_cb finish_cb; struct work_struct finish_work; struct list_head node; struct delayed_work work_tdr; }; -extern const struct fence_ops amd_sched_fence_ops_scheduled; -extern const struct fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; +extern const struct dma_fence_ops amd_sched_fence_ops_finished; +static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) { if (f->ops == &amd_sched_fence_ops_scheduled) return container_of(f, struct amd_sched_fence, scheduled); @@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - struct fence *(*dependency)(struct amd_sched_job *sched_job); - struct fence *(*run_job)(struct amd_sched_job *sched_job); + struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); + struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); void (*timedout_job)(struct amd_sched_job *sched_job); void (*free_job)(struct amd_sched_job *sched_job); }; diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..c26fa298fe9e 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq); - fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); - fence_init(&fence->finished, &amd_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); + dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); return fence; } void amd_sched_fence_scheduled(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->scheduled); + int ret = dma_fence_signal(&fence->scheduled); if (!ret) - FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->scheduled, + "signaled from irq context\n"); else - FENCE_TRACE(&fence->scheduled, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->scheduled, + "was already signaled\n"); } void amd_sched_fence_finished(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->finished); + int ret = dma_fence_signal(&fence->finished); if (!ret) - FENCE_TRACE(&fence->finished, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->finished, + "signaled from irq context\n"); else - FENCE_TRACE(&fence->finished, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->finished, + "was already signaled\n"); } -static const char *amd_sched_fence_get_driver_name(struct fence *fence) +static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) { return "amd_sched"; } -static const char *amd_sched_fence_get_timeline_name(struct fence *f) +static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); return (const char *)fence->sched->name; } -static bool amd_sched_fence_enable_signaling(struct fence *f) +static bool amd_sched_fence_enable_signaling(struct dma_fence *f) { return true; } @@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) */ static void amd_sched_fence_free(struct rcu_head *rcu) { - struct fence *f = container_of(rcu, struct fence, rcu); + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct amd_sched_fence *fence = to_amd_sched_fence(f); - fence_put(fence->parent); + dma_fence_put(fence->parent); kmem_cache_free(sched_fence_slab, fence); } @@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amd_sched_fence_release_scheduled(struct fence *f) +static void amd_sched_fence_release_scheduled(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); @@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f) * * Drop the extra reference from the scheduled fence to the base fence. */ -static void amd_sched_fence_release_finished(struct fence *f) +static void amd_sched_fence_release_finished(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); - fence_put(&fence->scheduled); + dma_fence_put(&fence->scheduled); } -const struct fence_ops amd_sched_fence_ops_scheduled = { +const struct dma_fence_ops amd_sched_fence_ops_scheduled = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amd_sched_fence_release_scheduled, }; -const struct fence_ops amd_sched_fence_ops_finished = { +const struct dma_fence_ops amd_sched_fence_ops_finished = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amd_sched_fence_release_finished, }; diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index fb6a418ce6be..6477d1a65266 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, match); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 9280358b8f15..9f4739452a25 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, malidp_compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, + port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &malidp_master_ops, match); } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f608e4..94e46da9a758 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 10e12e74fc9f..bd6acc829f97 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -57,6 +57,13 @@ config DRM_PARADE_PS8622 ---help--- Parade eDP-LVDS bridge chip driver. +config DRM_SIL_SII8620 + tristate "Silicon Image SII8620 HDMI/MHL bridge" + depends on OF + select DRM_KMS_HELPER + help + Silicon Image SII8620 HDMI/MHL bridge chip driver. + config DRM_SII902X tristate "Silicon Image sii902x RGB/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index cdf3a3cf765d..97ed1a5fea9a 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o +obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c new file mode 100644 index 000000000000..b2c267df7ee7 --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -0,0 +1,1564 @@ +/* + * Silicon Image SiI8620 HDMI/MHL bridge driver + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <drm/bridge/mhl.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +#include "sil-sii8620.h" + +#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) + +enum sii8620_mode { + CM_DISCONNECTED, + CM_DISCOVERY, + CM_MHL1, + CM_MHL3, + CM_ECBUS_S +}; + +enum sii8620_sink_type { + SINK_NONE, + SINK_HDMI, + SINK_DVI +}; + +enum sii8620_mt_state { + MT_STATE_READY, + MT_STATE_BUSY, + MT_STATE_DONE +}; + +struct sii8620 { + struct drm_bridge bridge; + struct device *dev; + struct clk *clk_xtal; + struct gpio_desc *gpio_reset; + struct gpio_desc *gpio_int; + struct regulator_bulk_data supplies[2]; + struct mutex lock; /* context lock, protects fields below */ + int error; + enum sii8620_mode mode; + enum sii8620_sink_type sink_type; + u8 cbus_status; + u8 stat[MHL_DST_SIZE]; + u8 xstat[MHL_XDS_SIZE]; + u8 devcap[MHL_DCAP_SIZE]; + u8 xdevcap[MHL_XDC_SIZE]; + u8 avif[19]; + struct edid *edid; + unsigned int gen2_write_burst:1; + enum sii8620_mt_state mt_state; + struct list_head mt_queue; +}; + +struct sii8620_mt_msg; + +typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx, + struct sii8620_mt_msg *msg); + +struct sii8620_mt_msg { + struct list_head node; + u8 reg[4]; + u8 ret; + sii8620_mt_msg_cb send; + sii8620_mt_msg_cb recv; +}; + +static const u8 sii8620_i2c_page[] = { + 0x39, /* Main System */ + 0x3d, /* TDM and HSIC */ + 0x49, /* TMDS Receiver, MHL EDID */ + 0x4d, /* eMSC, HDCP, HSIC */ + 0x5d, /* MHL Spec */ + 0x64, /* MHL CBUS */ + 0x59, /* Hardware TPI (Transmitter Programming Interface) */ + 0x61, /* eCBUS-S, eCBUS-D */ +}; + +static void sii8620_fetch_edid(struct sii8620 *ctx); +static void sii8620_set_upstream_edid(struct sii8620 *ctx); +static void sii8620_enable_hpd(struct sii8620 *ctx); +static void sii8620_mhl_disconnected(struct sii8620 *ctx); + +static int sii8620_clear_error(struct sii8620 *ctx) +{ + int ret = ctx->error; + + ctx->error = 0; + return ret; +} + +static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) +{ + struct device *dev = ctx->dev; + struct i2c_client *client = to_i2c_client(dev); + u8 data = addr; + struct i2c_msg msg[] = { + { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags, + .len = 1, + .buf = &data + }, + { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags | I2C_M_RD, + .len = len, + .buf = buf + }, + }; + int ret; + + if (ctx->error) + return; + + ret = i2c_transfer(client->adapter, msg, 2); + dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret); + + if (ret != 2) { + dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n", + addr, len, ret); + ctx->error = ret < 0 ? ret : -EIO; + } +} + +static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) +{ + u8 ret; + + sii8620_read_buf(ctx, addr, &ret, 1); + return ret; +} + +static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf, + int len) +{ + struct device *dev = ctx->dev; + struct i2c_client *client = to_i2c_client(dev); + u8 data[2]; + struct i2c_msg msg = { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags, + .len = len + 1, + }; + int ret; + + if (ctx->error) + return; + + if (len > 1) { + msg.buf = kmalloc(len + 1, GFP_KERNEL); + if (!msg.buf) { + ctx->error = -ENOMEM; + return; + } + memcpy(msg.buf + 1, buf, len); + } else { + msg.buf = data; + msg.buf[1] = *buf; + } + + msg.buf[0] = addr; + + ret = i2c_transfer(client->adapter, &msg, 1); + dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret); + + if (ret != 1) { + dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n", + addr, len, buf, ret); + ctx->error = ret ?: -EIO; + } + + if (len > 1) + kfree(msg.buf); +} + +#define sii8620_write(ctx, addr, arr...) \ +({\ + u8 d[] = { arr }; \ + sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \ +}) + +static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len) +{ + int i; + + for (i = 0; i < len; i += 2) + sii8620_write(ctx, seq[i], seq[i + 1]); +} + +#define sii8620_write_seq(ctx, seq...) \ +({\ + const u16 d[] = { seq }; \ + __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ +}) + +#define sii8620_write_seq_static(ctx, seq...) \ +({\ + static const u16 d[] = { seq }; \ + __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ +}) + +static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val) +{ + val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask); + sii8620_write(ctx, addr, val); +} + +static void sii8620_mt_cleanup(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg, *n; + + list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { + list_del(&msg->node); + kfree(msg); + } + ctx->mt_state = MT_STATE_READY; +} + +static void sii8620_mt_work(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg; + + if (ctx->error) + return; + if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue)) + return; + + if (ctx->mt_state == MT_STATE_DONE) { + ctx->mt_state = MT_STATE_READY; + msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, + node); + if (msg->recv) + msg->recv(ctx, msg); + list_del(&msg->node); + kfree(msg); + } + + if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue)) + return; + + ctx->mt_state = MT_STATE_BUSY; + msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); + if (msg->send) + msg->send(ctx, msg); +} + +static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + switch (msg->reg[0]) { + case MHL_WRITE_STAT: + case MHL_SET_INT: + sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2); + sii8620_write(ctx, REG_MSC_COMMAND_START, + BIT_MSC_COMMAND_START_WRITE_STAT); + break; + case MHL_MSC_MSG: + sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3); + sii8620_write(ctx, REG_MSC_COMMAND_START, + BIT_MSC_COMMAND_START_MSC_MSG); + break; + default: + dev_err(ctx->dev, "%s: command %#x not supported\n", __func__, + msg->reg[0]); + } +} + +static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); + + if (!msg) + ctx->error = -ENOMEM; + else + list_add_tail(&msg->node, &ctx->mt_queue); + + return msg; +} + +static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2) +{ + struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); + + if (!msg) + return; + + msg->reg[0] = cmd; + msg->reg[1] = arg1; + msg->reg[2] = arg2; + msg->send = sii8620_mt_msc_cmd_send; +} + +static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val) +{ + sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val); +} + +static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask) +{ + sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask); +} + +static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data) +{ + sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data); +} + +static void sii8620_mt_rap(struct sii8620 *ctx, u8 code) +{ + sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); +} + +static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN; + + if (msg->reg[0] == MHL_READ_XDEVCAP) + ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; + + sii8620_write_seq(ctx, + REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE, + REG_EDID_CTRL, ctrl, + REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START + ); +} + +/* copy src to dst and set changed bits in src */ +static void sii8620_update_array(u8 *dst, u8 *src, int count) +{ + while (--count >= 0) { + *src ^= *dst; + *dst++ ^= *src++; + } +} + +static void sii8620_mr_devcap(struct sii8620 *ctx) +{ + static const char * const sink_str[] = { + [SINK_NONE] = "NONE", + [SINK_HDMI] = "HDMI", + [SINK_DVI] = "DVI" + }; + + u8 dcap[MHL_DCAP_SIZE]; + char sink_name[20]; + struct device *dev = ctx->dev; + + sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE); + if (ctx->error < 0) + return; + + dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap); + dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n", + dcap[MHL_DCAP_MHL_VERSION] / 16, + dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H], + dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], + dcap[MHL_DCAP_DEVICE_ID_L]); + sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); + + if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK)) + return; + + sii8620_fetch_edid(ctx); + if (!ctx->edid) { + dev_err(ctx->dev, "Cannot fetch EDID\n"); + sii8620_mhl_disconnected(ctx); + return; + } + + if (drm_detect_hdmi_monitor(ctx->edid)) + ctx->sink_type = SINK_HDMI; + else + ctx->sink_type = SINK_DVI; + + drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name)); + + dev_info(dev, "detected sink(type: %s): %s\n", + sink_str[ctx->sink_type], sink_name); + sii8620_set_upstream_edid(ctx); + sii8620_enable_hpd(ctx); +} + +static void sii8620_mr_xdevcap(struct sii8620 *ctx) +{ + sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap, + MHL_XDC_SIZE); + + sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE), + MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT); + sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP); +} + +static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN; + + if (msg->reg[0] == MHL_READ_XDEVCAP) + ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; + + sii8620_write_seq(ctx, + REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE + | BIT_INTR9_EDID_ERROR, + REG_EDID_CTRL, ctrl, + REG_EDID_FIFO_ADDR, 0 + ); + + if (msg->reg[0] == MHL_READ_XDEVCAP) + sii8620_mr_xdevcap(ctx); + else + sii8620_mr_devcap(ctx); +} + +static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) +{ + struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); + + if (!msg) + return; + + msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP; + msg->send = sii8620_mt_read_devcap_send; + msg->recv = sii8620_mt_read_devcap_recv; +} + +static void sii8620_fetch_edid(struct sii8620 *ctx) +{ + u8 lm_ddc, ddc_cmd, int3, cbus; + int fetched, i; + int edid_len = EDID_LENGTH; + u8 *edid; + + sii8620_readb(ctx, REG_CBUS_STATUS); + lm_ddc = sii8620_readb(ctx, REG_LM_DDC); + ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD); + + sii8620_write_seq(ctx, + REG_INTR9_MASK, 0, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, + REG_HDCP2X_POLL_CS, 0x71, + REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX, + REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED, + ); + + for (i = 0; i < 256; ++i) { + u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS); + + if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG)) + break; + sii8620_write(ctx, REG_DDC_STATUS, + BIT_DDC_STATUS_DDC_FIFO_EMPTY); + } + + sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1); + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) { + ctx->error = -ENOMEM; + return; + } + +#define FETCH_SIZE 16 + for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) { + sii8620_readb(ctx, REG_DDC_STATUS); + sii8620_write_seq(ctx, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO, + REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY + ); + sii8620_write_seq(ctx, + REG_DDC_SEGM, fetched >> 8, + REG_DDC_OFFSET, fetched & 0xff, + REG_DDC_DIN_CNT1, FETCH_SIZE, + REG_DDC_DIN_CNT2, 0, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK + ); + + do { + int3 = sii8620_readb(ctx, REG_INTR3); + cbus = sii8620_readb(ctx, REG_CBUS_STATUS); + + if (int3 & BIT_DDC_CMD_DONE) + break; + + if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { + kfree(edid); + edid = NULL; + goto end; + } + } while (1); + + sii8620_readb(ctx, REG_DDC_STATUS); + while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) + usleep_range(10, 20); + + sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); + if (fetched + FETCH_SIZE == EDID_LENGTH) { + u8 ext = ((struct edid *)edid)->extensions; + + if (ext) { + u8 *new_edid; + + edid_len += ext * EDID_LENGTH; + new_edid = krealloc(edid, edid_len, GFP_KERNEL); + if (!new_edid) { + kfree(edid); + ctx->error = -ENOMEM; + return; + } + edid = new_edid; + } + } + + if (fetched + FETCH_SIZE == edid_len) + sii8620_write(ctx, REG_INTR3, int3); + } + + sii8620_write(ctx, REG_LM_DDC, lm_ddc); + +end: + kfree(ctx->edid); + ctx->edid = (struct edid *)edid; +} + +static void sii8620_set_upstream_edid(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N + | BIT_DPD_PD_MHL_CLK_N, 0xff); + + sii8620_write_seq_static(ctx, + REG_RX_HDMI_CTRL3, 0x00, + REG_PKT_FILTER_0, 0xFF, + REG_PKT_FILTER_1, 0xFF, + REG_ALICE0_BW_I2C, 0x06 + ); + + sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER, + BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN, + REG_EDID_FIFO_ADDR, 0, + ); + + sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid, + (ctx->edid->extensions + 1) * EDID_LENGTH); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN, + REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE, + REG_INTR9_MASK, 0 + ); +} + +static void sii8620_xtal_set_rate(struct sii8620 *ctx) +{ + static const struct { + unsigned int rate; + u8 div; + u8 tp1; + } rates[] = { + { 19200, 0x04, 0x53 }, + { 20000, 0x04, 0x62 }, + { 24000, 0x05, 0x75 }, + { 30000, 0x06, 0x92 }, + { 38400, 0x0c, 0xbc }, + }; + unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000; + int i; + + for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i) + if (rate <= rates[i].rate) + break; + + if (rate != rates[i].rate) + dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n", + rate, rates[i].rate); + + sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div); + sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1); +} + +static int sii8620_hw_on(struct sii8620 *ctx) +{ + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret) + return ret; + usleep_range(10000, 20000); + return clk_prepare_enable(ctx->clk_xtal); +} + +static int sii8620_hw_off(struct sii8620 *ctx) +{ + clk_disable_unprepare(ctx->clk_xtal); + gpiod_set_value(ctx->gpio_reset, 1); + return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); +} + +static void sii8620_hw_reset(struct sii8620 *ctx) +{ + usleep_range(10000, 20000); + gpiod_set_value(ctx->gpio_reset, 0); + usleep_range(5000, 20000); + gpiod_set_value(ctx->gpio_reset, 1); + usleep_range(10000, 20000); + gpiod_set_value(ctx->gpio_reset, 0); + msleep(300); +} + +static void sii8620_cbus_reset(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST + | BIT_PWD_SRST_CBUS_RST_SW_EN, + REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN + ); +} + +static void sii8620_set_auto_zone(struct sii8620 *ctx) +{ + if (ctx->mode != CM_MHL1) { + sii8620_write_seq_static(ctx, + REG_TX_ZONE_CTL1, 0x0, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE + ); + } else { + sii8620_write_seq_static(ctx, + REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE + ); + } +} + +static void sii8620_stop_video(struct sii8620 *ctx) +{ + u8 uninitialized_var(val); + + sii8620_write_seq_static(ctx, + REG_TPI_INTR_EN, 0, + REG_HDCP2X_INTR0_MASK, 0, + REG_TPI_COPP_DATA2, 0, + REG_TPI_INTR_ST0, ~0, + ); + + switch (ctx->sink_type) { + case SINK_DVI: + val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN + | BIT_TPI_SC_TPI_AV_MUTE; + break; + case SINK_HDMI: + val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN + | BIT_TPI_SC_TPI_AV_MUTE + | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI; + break; + default: + return; + } + + sii8620_write(ctx, REG_TPI_SC, val); +} + +static void sii8620_start_hdmi(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL + | BIT_RX_HDMI_CTRL2_USE_AV_MUTE, + REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE + | BIT_VID_OVRRD_M1080P_OVRRD, + REG_VID_MODE, 0, + REG_MHL_TOP_CTL, 0x1, + REG_MHLTX_CTL6, 0xa0, + REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), + REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), + ); + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL | + MHL_DST_LM_PATH_ENABLED); + + sii8620_set_auto_zone(ctx); + + sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); + + sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif, + ARRAY_SIZE(ctx->avif)); + + sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2); +} + +static void sii8620_start_video(struct sii8620 *ctx) +{ + if (ctx->mode < CM_MHL3) + sii8620_stop_video(ctx); + + switch (ctx->sink_type) { + case SINK_HDMI: + sii8620_start_hdmi(ctx); + break; + case SINK_DVI: + default: + break; + } +} + +static void sii8620_disable_hpd(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0); + sii8620_write_seq_static(ctx, + REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN, + REG_INTR8_MASK, 0 + ); +} + +static void sii8620_enable_hpd(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_TMDS_CSTAT_P3, + BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS + | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0); + sii8620_write_seq_static(ctx, + REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN + | BIT_HPD_CTRL_HPD_HIGH, + ); +} + +static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx) +{ + if (ctx->gen2_write_burst) + return; + + sii8620_write_seq_static(ctx, + REG_MDT_RCV_TIMEOUT, 100, + REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN + ); + ctx->gen2_write_burst = 1; +} + +static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx) +{ + if (!ctx->gen2_write_burst) + return; + + sii8620_write_seq_static(ctx, + REG_MDT_XMIT_CTRL, 0, + REG_MDT_RCV_CTRL, 0 + ); + ctx->gen2_write_burst = 0; +} + +static void sii8620_start_gen2_write_burst(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT + | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR + | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD + | BIT_MDT_XMIT_SM_ERROR, + REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY + | BIT_MDT_IDLE_AFTER_HAWB_DISABLE + | BIT_MDT_RFIFO_DATA_RDY + ); + sii8620_enable_gen2_write_burst(ctx); +} + +static void sii8620_mhl_discover(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_DISC_PULSE_PROCEED, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K), + REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT + | BIT_MHL_EST_INT + | BIT_NOT_MHL_EST_INT + | BIT_CBUS_MHL3_DISCON_INT + | BIT_CBUS_MHL12_DISCON_INT + | BIT_RGND_READY_INT, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, + REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE + | BIT_MHL_DP_CTL0_TX_OE_OVR, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_MHL_DP_CTL1, 0xA2, + REG_MHL_DP_CTL2, 0x03, + REG_MHL_DP_CTL3, 0x35, + REG_MHL_DP_CTL5, 0x02, + REG_MHL_DP_CTL6, 0x02, + REG_MHL_DP_CTL7, 0x03, + REG_COC_CTLC, 0xFF, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 + | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC, + REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE + | BIT_COC_CALIBRATION_DONE, + REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD + | BIT_CBUS_CMD_ABORT, + REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE + | BIT_CBUS_HPD_CHG + | BIT_CBUS_MSC_MR_WRITE_STAT + | BIT_CBUS_MSC_MR_MSC_MSG + | BIT_CBUS_MSC_MR_WRITE_BURST + | BIT_CBUS_MSC_MR_SET_INT + | BIT_CBUS_MSC_MT_DONE_NACK + ); +} + +static void sii8620_peer_specific_init(struct sii8620 *ctx) +{ + if (ctx->mode == CM_MHL3) + sii8620_write_seq_static(ctx, + REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD, + REG_EMSCINTRMASK1, + BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR + ); + else + sii8620_write_seq_static(ctx, + REG_HDCP2X_INTR0_MASK, 0x00, + REG_EMSCINTRMASK1, 0x00, + REG_HDCP2X_INTR0, 0xFF, + REG_INTR1, 0xFF, + REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD + | BIT_SYS_CTRL1_TX_CTRL_HDMI + ); +} + +#define SII8620_MHL_VERSION 0x32 +#define SII8620_SCRATCHPAD_SIZE 16 +#define SII8620_INT_STAT_SIZE 0x33 + +static void sii8620_set_dev_cap(struct sii8620 *ctx) +{ + static const u8 devcap[MHL_DCAP_SIZE] = { + [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION, + [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER, + [MHL_DCAP_ADOPTER_ID_H] = 0x01, + [MHL_DCAP_ADOPTER_ID_L] = 0x41, + [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444 + | MHL_DCAP_VID_LINK_PPIXEL + | MHL_DCAP_VID_LINK_16BPP, + [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH, + [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS, + [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI, + [MHL_DCAP_BANDWIDTH] = 0x0f, + [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT + | MHL_DCAP_FEATURE_RAP_SUPPORT + | MHL_DCAP_FEATURE_SP_SUPPORT, + [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE, + [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE, + }; + static const u8 xdcap[MHL_XDC_SIZE] = { + [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075 + | MHL_XDC_ECBUS_S_8BIT, + [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150 + | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600, + [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST, + [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE, + }; + + sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap)); + sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap)); +} + +static void sii8620_mhl_init(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_CBUS_MSC_COMPAT_CTRL, + BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN, + ); + + sii8620_peer_specific_init(ctx); + + sii8620_disable_hpd(ctx); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_TMDS0_CCTRL1, 0x90, + REG_TMDS_CLK_EN, 0x01, + REG_TMDS_CH_EN, 0x11, + REG_BGR_BIAS, 0x87, + REG_ALICE0_ZONE_CTRL, 0xE8, + REG_ALICE0_MODE_CTRL, 0x04, + ); + sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0); + sii8620_write_seq_static(ctx, + REG_TPI_HW_OPT3, 0x76, + REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE, + REG_TPI_DTD_B2, 79, + ); + sii8620_set_dev_cap(ctx); + sii8620_write_seq_static(ctx, + REG_MDT_XMIT_TIMEOUT, 100, + REG_MDT_XMIT_CTRL, 0x03, + REG_MDT_XFIFO_STAT, 0x00, + REG_MDT_RCV_TIMEOUT, 100, + REG_CBUS_LINK_CTRL_8, 0x1D, + ); + + sii8620_start_gen2_write_burst(ctx); + sii8620_write_seq_static(ctx, + REG_BIST_CTRL, 0x00, + REG_COC_CTL1, 0x10, + REG_COC_CTL2, 0x18, + REG_COC_CTLF, 0x07, + REG_COC_CTL11, 0xF8, + REG_COC_CTL17, 0x61, + REG_COC_CTL18, 0x46, + REG_COC_CTL19, 0x15, + REG_COC_CTL1A, 0x01, + REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN, + REG_MHL_COC_CTL4, 0x2D, + REG_MHL_COC_CTL5, 0xF9, + REG_MSC_HEARTBEAT_CTRL, 0x27, + ); + sii8620_disable_gen2_write_burst(ctx); + + /* currently MHL3 is not supported, so we force version to 0 */ + sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0); + sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY), + MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP + | MHL_DST_CONN_POW_STAT); + sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG); +} + +static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) +{ + if (ctx->mode == mode) + return; + + ctx->mode = mode; + + switch (mode) { + case CM_MHL1: + sii8620_write_seq_static(ctx, + REG_CBUS_MSC_COMPAT_CTRL, 0x02, + REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 + | BIT_DPD_OSC_EN, + REG_COC_INTR_MASK, 0 + ); + break; + case CM_MHL3: + sii8620_write_seq_static(ctx, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_COC_CTL0, 0x40, + REG_MHL_COC_CTL1, 0x07 + ); + break; + case CM_DISCONNECTED: + break; + default: + dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); + break; + } + + sii8620_set_auto_zone(ctx); + + if (mode != CM_MHL1) + return; + + sii8620_write_seq_static(ctx, + REG_MHL_DP_CTL0, 0xBC, + REG_MHL_DP_CTL1, 0xBB, + REG_MHL_DP_CTL3, 0x48, + REG_MHL_DP_CTL5, 0x39, + REG_MHL_DP_CTL2, 0x2A, + REG_MHL_DP_CTL6, 0x2A, + REG_MHL_DP_CTL7, 0x08 + ); +} + +static void sii8620_disconnect(struct sii8620 *ctx) +{ + sii8620_disable_gen2_write_burst(ctx); + sii8620_stop_video(ctx); + msleep(50); + sii8620_cbus_reset(ctx); + sii8620_set_mode(ctx, CM_DISCONNECTED); + sii8620_write_seq_static(ctx, + REG_COC_CTL0, 0x40, + REG_CBUS3_CNVT, 0x84, + REG_COC_CTL14, 0x00, + REG_COC_CTL0, 0x40, + REG_HRXCTRL3, 0x07, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, + REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE + | BIT_MHL_DP_CTL0_TX_OE_OVR, + REG_MHL_DP_CTL1, 0xBB, + REG_MHL_DP_CTL3, 0x48, + REG_MHL_DP_CTL5, 0x3F, + REG_MHL_DP_CTL2, 0x2F, + REG_MHL_DP_CTL6, 0x2A, + REG_MHL_DP_CTL7, 0x03 + ); + sii8620_disable_hpd(ctx); + sii8620_write_seq_static(ctx, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_MHL_COC_CTL1, 0x07, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_DISC_CTRL8, 0x00, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_INT_CTRL, 0x00, + REG_MSC_HEARTBEAT_CTRL, 0x27, + REG_DISC_CTRL1, 0x25, + REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT, + REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT, + REG_MDT_INT_1, 0xff, + REG_MDT_INT_1_MASK, 0x00, + REG_MDT_INT_0, 0xff, + REG_MDT_INT_0_MASK, 0x00, + REG_COC_INTR, 0xff, + REG_COC_INTR_MASK, 0x00, + REG_TRXINTH, 0xff, + REG_TRXINTMH, 0x00, + REG_CBUS_INT_0, 0xff, + REG_CBUS_INT_0_MASK, 0x00, + REG_CBUS_INT_1, 0xff, + REG_CBUS_INT_1_MASK, 0x00, + REG_EMSCINTR, 0xff, + REG_EMSCINTRMASK, 0x00, + REG_EMSCINTR1, 0xff, + REG_EMSCINTRMASK1, 0x00, + REG_INTR8, 0xff, + REG_INTR8_MASK, 0x00, + REG_TPI_INTR_ST0, 0xff, + REG_TPI_INTR_EN, 0x00, + REG_HDCP2X_INTR0, 0xff, + REG_HDCP2X_INTR0_MASK, 0x00, + REG_INTR9, 0xff, + REG_INTR9_MASK, 0x00, + REG_INTR3, 0xff, + REG_INTR3_MASK, 0x00, + REG_INTR5, 0xff, + REG_INTR5_MASK, 0x00, + REG_INTR2, 0xff, + REG_INTR2_MASK, 0x00, + ); + memset(ctx->stat, 0, sizeof(ctx->stat)); + memset(ctx->xstat, 0, sizeof(ctx->xstat)); + memset(ctx->devcap, 0, sizeof(ctx->devcap)); + memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); + ctx->cbus_status = 0; + ctx->sink_type = SINK_NONE; + kfree(ctx->edid); + ctx->edid = NULL; + sii8620_mt_cleanup(ctx); +} + +static void sii8620_mhl_disconnected(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_CBUS_MSC_COMPAT_CTRL, + BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN + ); + sii8620_disconnect(ctx); +} + +static void sii8620_irq_disc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0); + + if (stat & VAL_CBUS_MHL_DISCON) + sii8620_mhl_disconnected(ctx); + + if (stat & BIT_RGND_READY_INT) { + u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2); + + if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) { + sii8620_mhl_discover(ctx); + } else { + sii8620_write_seq_static(ctx, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_NOMHL_EST + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT + | BIT_CBUS_MHL3_DISCON_INT + | BIT_CBUS_MHL12_DISCON_INT + | BIT_NOT_MHL_EST_INT + ); + } + } + if (stat & BIT_MHL_EST_INT) + sii8620_mhl_init(ctx); + + sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat); +} + +static void sii8620_irq_g2wb(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_MDT_INT_0); + + if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE) + dev_dbg(ctx->dev, "HAWB idle\n"); + + sii8620_write(ctx, REG_MDT_INT_0, stat); +} + +static void sii8620_status_changed_dcap(struct sii8620 *ctx) +{ + if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) { + sii8620_set_mode(ctx, CM_MHL1); + sii8620_peer_specific_init(ctx); + sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE + | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR); + } +} + +static void sii8620_status_changed_path(struct sii8620 *ctx) +{ + if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL + | MHL_DST_LM_PATH_ENABLED); + sii8620_mt_read_devcap(ctx, false); + } else { + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL); + } +} + +static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) +{ + u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE]; + + sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE); + sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE); + + sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); + sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); + + if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) + sii8620_status_changed_dcap(ctx); + + if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) + sii8620_status_changed_path(ctx); +} + +static void sii8620_msc_mr_set_int(struct sii8620 *ctx) +{ + u8 ints[MHL_INT_SIZE]; + + sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); + sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); +} + +static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) +{ + struct device *dev = ctx->dev; + + if (list_empty(&ctx->mt_queue)) { + dev_err(dev, "unexpected MSC MT response\n"); + return NULL; + } + + return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); +} + +static void sii8620_msc_mt_done(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); + + if (!msg) + return; + + msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0); + ctx->mt_state = MT_STATE_DONE; +} + +static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); + u8 buf[2]; + + if (!msg) + return; + + sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); + + switch (buf[0]) { + case MHL_MSC_MSG_RAPK: + msg->ret = buf[1]; + ctx->mt_state = MT_STATE_DONE; + break; + default: + dev_err(ctx->dev, "%s message type %d,%d not supported", + __func__, buf[0], buf[1]); + } +} + +static void sii8620_irq_msc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0); + + if (stat & ~BIT_CBUS_HPD_CHG) + sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG); + + if (stat & BIT_CBUS_HPD_CHG) { + u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS); + + if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) { + sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG); + } else { + stat ^= BIT_CBUS_STATUS_CBUS_HPD; + cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD; + } + ctx->cbus_status = cbus_stat; + } + + if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) + sii8620_msc_mr_write_stat(ctx); + + if (stat & BIT_CBUS_MSC_MR_SET_INT) + sii8620_msc_mr_set_int(ctx); + + if (stat & BIT_CBUS_MSC_MT_DONE) + sii8620_msc_mt_done(ctx); + + if (stat & BIT_CBUS_MSC_MR_MSC_MSG) + sii8620_msc_mr_msc_msg(ctx); +} + +static void sii8620_irq_coc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_COC_INTR); + + sii8620_write(ctx, REG_COC_INTR, stat); +} + +static void sii8620_irq_merr(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1); + + sii8620_write(ctx, REG_CBUS_INT_1, stat); +} + +static void sii8620_irq_edid(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR9); + + sii8620_write(ctx, REG_INTR9, stat); + + if (stat & BIT_INTR9_DEVCAP_DONE) + ctx->mt_state = MT_STATE_DONE; +} + +static void sii8620_scdt_high(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, + REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, + ); +} + +static void sii8620_scdt_low(struct sii8620 *ctx) +{ + sii8620_write(ctx, REG_TMDS_CSTAT_P3, + BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | + BIT_TMDS_CSTAT_P3_CLR_AVI); + + sii8620_stop_video(ctx); + + sii8620_write(ctx, REG_INTR8_MASK, 0); +} + +static void sii8620_irq_scdt(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR5); + + if (stat & BIT_INTR_SCDT_CHANGE) { + u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); + + if (cstat & BIT_TMDS_CSTAT_P3_SCDT) + sii8620_scdt_high(ctx); + else + sii8620_scdt_low(ctx); + } + + sii8620_write(ctx, REG_INTR5, stat); +} + +static void sii8620_new_vsi(struct sii8620 *ctx) +{ + u8 vsif[11]; + + sii8620_write(ctx, REG_RX_HDMI_CTRL2, + VAL_RX_HDMI_CTRL2_DEFVAL | + BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); + sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, + ARRAY_SIZE(vsif)); +} + +static void sii8620_new_avi(struct sii8620 *ctx) +{ + sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); + sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, + ARRAY_SIZE(ctx->avif)); +} + +static void sii8620_irq_infr(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR8) + & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); + + sii8620_write(ctx, REG_INTR8, stat); + + if (stat & BIT_CEA_NEW_VSI) + sii8620_new_vsi(ctx); + + if (stat & BIT_CEA_NEW_AVI) + sii8620_new_avi(ctx); + + if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) + sii8620_start_video(ctx); +} + +/* endian agnostic, non-volatile version of test_bit */ +static bool sii8620_test_bit(unsigned int nr, const u8 *addr) +{ + return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE)); +} + +static irqreturn_t sii8620_irq_thread(int irq, void *data) +{ + static const struct { + int bit; + void (*handler)(struct sii8620 *ctx); + } irq_vec[] = { + { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc }, + { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb }, + { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc }, + { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc }, + { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr }, + { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, + { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, + { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, + }; + struct sii8620 *ctx = data; + u8 stats[LEN_FAST_INTR_STAT]; + int i, ret; + + mutex_lock(&ctx->lock); + + sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats)); + for (i = 0; i < ARRAY_SIZE(irq_vec); ++i) + if (sii8620_test_bit(irq_vec[i].bit, stats)) + irq_vec[i].handler(ctx); + + sii8620_mt_work(ctx); + + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret); + sii8620_mhl_disconnected(ctx); + } + mutex_unlock(&ctx->lock); + + return IRQ_HANDLED; +} + +static void sii8620_cable_in(struct sii8620 *ctx) +{ + struct device *dev = ctx->dev; + u8 ver[5]; + int ret; + + ret = sii8620_hw_on(ctx); + if (ret) { + dev_err(dev, "Error powering on, %d.\n", ret); + return; + } + sii8620_hw_reset(ctx); + + sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(dev, "Error accessing I2C bus, %d.\n", ret); + return; + } + + dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0], + ver[3], ver[2], ver[4]); + + sii8620_write(ctx, REG_DPD, + BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN); + + sii8620_xtal_set_rate(ctx); + sii8620_disconnect(ctx); + + sii8620_write_seq_static(ctx, + REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG + | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734, + REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, + ); + + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(dev, "Error accessing I2C bus, %d.\n", ret); + return; + } + + enable_irq(to_i2c_client(ctx->dev)->irq); +} + +static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sii8620, bridge); +} + +static bool sii8620_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sii8620 *ctx = bridge_to_sii8620(bridge); + bool ret = false; + int max_clock = 74250; + + mutex_lock(&ctx->lock); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + goto out; + + if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) + max_clock = 300000; + + ret = mode->clock <= max_clock; + +out: + mutex_unlock(&ctx->lock); + + return ret; +} + +static const struct drm_bridge_funcs sii8620_bridge_funcs = { + .mode_fixup = sii8620_mode_fixup, +}; + +static int sii8620_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct sii8620 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + mutex_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->mt_queue); + + ctx->clk_xtal = devm_clk_get(dev, "xtal"); + if (IS_ERR(ctx->clk_xtal)) { + dev_err(dev, "failed to get xtal clock from DT\n"); + return PTR_ERR(ctx->clk_xtal); + } + + if (!client->irq) { + dev_err(dev, "no irq provided\n"); + return -EINVAL; + } + irq_set_status_flags(client->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(dev, client->irq, NULL, + sii8620_irq_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "sii8620", ctx); + + ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->gpio_reset)) { + dev_err(dev, "failed to get reset gpio from DT\n"); + return PTR_ERR(ctx->gpio_reset); + } + + ctx->supplies[0].supply = "cvcc10"; + ctx->supplies[1].supply = "iovcc18"; + ret = devm_regulator_bulk_get(dev, 2, ctx->supplies); + if (ret) + return ret; + + i2c_set_clientdata(client, ctx); + + ctx->bridge.funcs = &sii8620_bridge_funcs; + ctx->bridge.of_node = dev->of_node; + drm_bridge_add(&ctx->bridge); + + sii8620_cable_in(ctx); + + return 0; +} + +static int sii8620_remove(struct i2c_client *client) +{ + struct sii8620 *ctx = i2c_get_clientdata(client); + + disable_irq(to_i2c_client(ctx->dev)->irq); + drm_bridge_remove(&ctx->bridge); + sii8620_hw_off(ctx); + + return 0; +} + +static const struct of_device_id sii8620_dt_match[] = { + { .compatible = "sil,sii8620" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sii8620_dt_match); + +static const struct i2c_device_id sii8620_id[] = { + { "sii8620", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, sii8620_id); +static struct i2c_driver sii8620_driver = { + .driver = { + .name = "sii8620", + .of_match_table = of_match_ptr(sii8620_dt_match), + }, + .probe = sii8620_probe, + .remove = sii8620_remove, + .id_table = sii8620_id, +}; + +module_i2c_driver(sii8620_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h new file mode 100644 index 000000000000..6ff616a4f6ce --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.h @@ -0,0 +1,1517 @@ +/* + * Registers of Silicon Image SiI8620 Mobile HD Transmitter + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + * + * Based on MHL driver for Android devices. + * Copyright (C) 2013-2014 Silicon Image, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SIL_SII8620_H__ +#define __SIL_SII8620_H__ + +/* Vendor ID Low byte, default value: 0x01 */ +#define REG_VND_IDL 0x0000 + +/* Vendor ID High byte, default value: 0x00 */ +#define REG_VND_IDH 0x0001 + +/* Device ID Low byte, default value: 0x60 */ +#define REG_DEV_IDL 0x0002 + +/* Device ID High byte, default value: 0x86 */ +#define REG_DEV_IDH 0x0003 + +/* Device Revision, default value: 0x10 */ +#define REG_DEV_REV 0x0004 + +/* OTP DBYTE510, default value: 0x00 */ +#define REG_OTP_DBYTE510 0x0006 + +/* System Control #1, default value: 0x00 */ +#define REG_SYS_CTRL1 0x0008 +#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7) +#define BIT_SYS_CTRL1_VSYNCPIN BIT(6) +#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5) +#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4) +#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3) +#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2) +#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1) +#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0) + +/* System Control DPD, default value: 0x90 */ +#define REG_DPD 0x000b +#define BIT_DPD_PWRON_PLL BIT(7) +#define BIT_DPD_PDNTX12 BIT(6) +#define BIT_DPD_PDNRX12 BIT(5) +#define BIT_DPD_OSC_EN BIT(4) +#define BIT_DPD_PWRON_HSIC BIT(3) +#define BIT_DPD_PDIDCK_N BIT(2) +#define BIT_DPD_PD_MHL_CLK_N BIT(1) + +/* Dual link Control, default value: 0x00 */ +#define REG_DCTL 0x000d +#define BIT_DCTL_TDM_LCLK_PHASE BIT(7) +#define BIT_DCTL_HSIC_CLK_PHASE BIT(6) +#define BIT_DCTL_CTS_TCK_PHASE BIT(5) +#define BIT_DCTL_EXT_DDC_SEL BIT(4) +#define BIT_DCTL_TRANSCODE BIT(3) +#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2) +#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1) +#define BIT_DCTL_TCLKNX_PHASE BIT(0) + +/* PWD Software Reset, default value: 0x20 */ +#define REG_PWD_SRST 0x000e +#define BIT_PWD_SRST_COC_DOC_RST BIT(7) +#define BIT_PWD_SRST_CBUS_RST_SW BIT(6) +#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5) +#define BIT_PWD_SRST_MHLFIFO_RST BIT(4) +#define BIT_PWD_SRST_CBUS_RST BIT(3) +#define BIT_PWD_SRST_SW_RST_AUTO BIT(2) +#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1) +#define BIT_PWD_SRST_SW_RST BIT(0) + +/* AKSV_1, default value: 0x00 */ +#define REG_AKSV_1 0x001d + +/* Video H Resolution #1, default value: 0x00 */ +#define REG_H_RESL 0x003a + +/* Video Mode, default value: 0x00 */ +#define REG_VID_MODE 0x004a +#define BIT_VID_MODE_M1080P BIT(6) + +/* Video Input Mode, default value: 0xc0 */ +#define REG_VID_OVRRD 0x0051 +#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7) +#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6) +#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5) +#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4) +#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3) +#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2) +#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0) + +/* I2C Address reassignment, default value: 0x00 */ +#define REG_PAGE_MHLSPEC_ADDR 0x0057 +#define REG_PAGE7_ADDR 0x0058 +#define REG_PAGE8_ADDR 0x005c + +/* Fast Interrupt Status, default value: 0x00 */ +#define REG_FAST_INTR_STAT 0x005f +#define LEN_FAST_INTR_STAT 7 +#define BIT_FAST_INTR_STAT_TIMR 8 +#define BIT_FAST_INTR_STAT_INT2 9 +#define BIT_FAST_INTR_STAT_DDC 10 +#define BIT_FAST_INTR_STAT_SCDT 11 +#define BIT_FAST_INTR_STAT_INFR 13 +#define BIT_FAST_INTR_STAT_EDID 14 +#define BIT_FAST_INTR_STAT_HDCP 15 +#define BIT_FAST_INTR_STAT_MSC 16 +#define BIT_FAST_INTR_STAT_MERR 17 +#define BIT_FAST_INTR_STAT_G2WB 18 +#define BIT_FAST_INTR_STAT_G2WB_ERR 19 +#define BIT_FAST_INTR_STAT_DISC 28 +#define BIT_FAST_INTR_STAT_BLOCK 30 +#define BIT_FAST_INTR_STAT_LTRN 31 +#define BIT_FAST_INTR_STAT_HDCP2 32 +#define BIT_FAST_INTR_STAT_TDM 42 +#define BIT_FAST_INTR_STAT_COC 51 + +/* GPIO Control, default value: 0x15 */ +#define REG_GPIO_CTRL1 0x006e +#define BIT_CTRL1_GPIO_I_8 BIT(5) +#define BIT_CTRL1_GPIO_OEN_8 BIT(4) +#define BIT_CTRL1_GPIO_I_7 BIT(3) +#define BIT_CTRL1_GPIO_OEN_7 BIT(2) +#define BIT_CTRL1_GPIO_I_6 BIT(1) +#define BIT_CTRL1_GPIO_OEN_6 BIT(0) + +/* Interrupt Control, default value: 0x06 */ +#define REG_INT_CTRL 0x006f +#define BIT_INT_CTRL_SOFTWARE_WP BIT(7) +#define BIT_INT_CTRL_INTR_OD BIT(2) +#define BIT_INT_CTRL_INTR_POLARITY BIT(1) + +/* Interrupt State, default value: 0x00 */ +#define REG_INTR_STATE 0x0070 +#define BIT_INTR_STATE_INTR_STATE BIT(0) + +/* Interrupt Source #1, default value: 0x00 */ +#define REG_INTR1 0x0071 + +/* Interrupt Source #2, default value: 0x00 */ +#define REG_INTR2 0x0072 + +/* Interrupt Source #3, default value: 0x01 */ +#define REG_INTR3 0x0073 +#define BIT_DDC_CMD_DONE BIT(3) + +/* Interrupt Source #5, default value: 0x00 */ +#define REG_INTR5 0x0074 + +/* Interrupt #1 Mask, default value: 0x00 */ +#define REG_INTR1_MASK 0x0075 + +/* Interrupt #2 Mask, default value: 0x00 */ +#define REG_INTR2_MASK 0x0076 + +/* Interrupt #3 Mask, default value: 0x00 */ +#define REG_INTR3_MASK 0x0077 + +/* Interrupt #5 Mask, default value: 0x00 */ +#define REG_INTR5_MASK 0x0078 +#define BIT_INTR_SCDT_CHANGE BIT(0) + +/* Hot Plug Connection Control, default value: 0x45 */ +#define REG_HPD_CTRL 0x0079 +#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7) +#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6) +#define BIT_HPD_CTRL_HPD_HIGH BIT(5) +#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4) +#define BIT_HPD_CTRL_GPIO_I_1 BIT(3) +#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2) +#define BIT_HPD_CTRL_GPIO_I_0 BIT(1) +#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0) + +/* GPIO Control, default value: 0x55 */ +#define REG_GPIO_CTRL 0x007a +#define BIT_CTRL_GPIO_I_5 BIT(7) +#define BIT_CTRL_GPIO_OEN_5 BIT(6) +#define BIT_CTRL_GPIO_I_4 BIT(5) +#define BIT_CTRL_GPIO_OEN_4 BIT(4) +#define BIT_CTRL_GPIO_I_3 BIT(3) +#define BIT_CTRL_GPIO_OEN_3 BIT(2) +#define BIT_CTRL_GPIO_I_2 BIT(1) +#define BIT_CTRL_GPIO_OEN_2 BIT(0) + +/* Interrupt Source 7, default value: 0x00 */ +#define REG_INTR7 0x007b + +/* Interrupt Source 8, default value: 0x00 */ +#define REG_INTR8 0x007c + +/* Interrupt #7 Mask, default value: 0x00 */ +#define REG_INTR7_MASK 0x007d + +/* Interrupt #8 Mask, default value: 0x00 */ +#define REG_INTR8_MASK 0x007e +#define BIT_CEA_NEW_VSI BIT(2) +#define BIT_CEA_NEW_AVI BIT(1) + +/* IEEE, default value: 0x10 */ +#define REG_TMDS_CCTRL 0x0080 +#define BIT_TMDS_CCTRL_TMDS_OE BIT(4) + +/* TMDS Control #4, default value: 0x02 */ +#define REG_TMDS_CTRL4 0x0085 +#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1) +#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0) + +/* BIST CNTL, default value: 0x00 */ +#define REG_BIST_CTRL 0x00bb +#define BIT_RXBIST_VGB_EN BIT(7) +#define BIT_TXBIST_VGB_EN BIT(6) +#define BIT_BIST_START_SEL BIT(5) +#define BIT_BIST_START_BIT BIT(4) +#define BIT_BIST_ALWAYS_ON BIT(3) +#define BIT_BIST_TRANS BIT(2) +#define BIT_BIST_RESET BIT(1) +#define BIT_BIST_EN BIT(0) + +/* BIST DURATION0, default value: 0x00 */ +#define REG_BIST_TEST_SEL 0x00bd +#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f + +/* BIST VIDEO_MODE, default value: 0x00 */ +#define REG_BIST_VIDEO_MODE 0x00be +#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f + +/* BIST DURATION0, default value: 0x00 */ +#define REG_BIST_DURATION_0 0x00bf + +/* BIST DURATION1, default value: 0x00 */ +#define REG_BIST_DURATION_1 0x00c0 + +/* BIST DURATION2, default value: 0x00 */ +#define REG_BIST_DURATION_2 0x00c1 + +/* BIST 8BIT_PATTERN, default value: 0x00 */ +#define REG_BIST_8BIT_PATTERN 0x00c2 + +/* LM DDC, default value: 0x80 */ +#define REG_LM_DDC 0x00c7 +#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7) + +#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5) +#define BIT_LM_DDC_DDC_TPI_SW BIT(2) +#define BIT_LM_DDC_DDC_GRANT BIT(1) +#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0) + +/* DDC I2C Manual, default value: 0x03 */ +#define REG_DDC_MANUAL 0x00ec +#define BIT_DDC_MANUAL_MAN_DDC BIT(7) +#define BIT_DDC_MANUAL_VP_SEL BIT(6) +#define BIT_DDC_MANUAL_DSDA BIT(5) +#define BIT_DDC_MANUAL_DSCL BIT(4) +#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3) +#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2) +#define BIT_DDC_MANUAL_IO_DSDA BIT(1) +#define BIT_DDC_MANUAL_IO_DSCL BIT(0) + +/* DDC I2C Target Slave Address, default value: 0x00 */ +#define REG_DDC_ADDR 0x00ed +#define MSK_DDC_ADDR_DDC_ADDR 0xfe + +/* DDC I2C Target Segment Address, default value: 0x00 */ +#define REG_DDC_SEGM 0x00ee + +/* DDC I2C Target Offset Address, default value: 0x00 */ +#define REG_DDC_OFFSET 0x00ef + +/* DDC I2C Data In count #1, default value: 0x00 */ +#define REG_DDC_DIN_CNT1 0x00f0 + +/* DDC I2C Data In count #2, default value: 0x00 */ +#define REG_DDC_DIN_CNT2 0x00f1 +#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03 + +/* DDC I2C Status, default value: 0x04 */ +#define REG_DDC_STATUS 0x00f2 +#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6) +#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5) +#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4) +#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3) +#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2) +#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1) +#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0) + +/* DDC I2C Command, default value: 0x70 */ +#define REG_DDC_CMD 0x00f3 +#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6) +#define BIT_DDC_CMD_SDA_DEL_EN BIT(5) +#define BIT_DDC_CMD_DDC_FLT_EN BIT(4) + +#define MSK_DDC_CMD_DDC_CMD 0x0f +#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04 +#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09 +#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f + +/* DDC I2C FIFO Data In/Out, default value: 0x00 */ +#define REG_DDC_DATA 0x00f4 + +/* DDC I2C Data Out Counter, default value: 0x00 */ +#define REG_DDC_DOUT_CNT 0x00f5 +#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7) +#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f + +/* DDC I2C Delay Count, default value: 0x14 */ +#define REG_DDC_DELAY_CNT 0x00f6 + +/* Test Control, default value: 0x80 */ +#define REG_TEST_TXCTRL 0x00f7 +#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7) +#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6) +#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c +#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1) +#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0) + +/* CBUS Address, default value: 0x00 */ +#define REG_PAGE_CBUS_ADDR 0x00f8 + +/* I2C Device Address re-assignment */ +#define REG_PAGE1_ADDR 0x00fc +#define REG_PAGE2_ADDR 0x00fd +#define REG_PAGE3_ADDR 0x00fe +#define REG_HW_TPI_ADDR 0x00ff + +/* USBT CTRL0, default value: 0x00 */ +#define REG_UTSRST 0x0100 +#define BIT_UTSRST_FC_SRST BIT(5) +#define BIT_UTSRST_KEEPER_SRST BIT(4) +#define BIT_UTSRST_HTX_SRST BIT(3) +#define BIT_UTSRST_TRX_SRST BIT(2) +#define BIT_UTSRST_TTX_SRST BIT(1) +#define BIT_UTSRST_HRX_SRST BIT(0) + +/* HSIC RX Control3, default value: 0x07 */ +#define REG_HRXCTRL3 0x0104 +#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0 +#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2) +#define BIT_HRXCTRL3_STATUS_EN BIT(1) +#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0) + +/* HSIC RX INT Registers */ +#define REG_HRXINTL 0x0111 +#define REG_HRXINTH 0x0112 + +/* TDM TX NUMBITS, default value: 0x0c */ +#define REG_TTXNUMB 0x0116 +#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0 +#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3) +#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07 + +/* TDM TX NUMSPISYM, default value: 0x04 */ +#define REG_TTXSPINUMS 0x0117 + +/* TDM TX NUMHSICSYM, default value: 0x14 */ +#define REG_TTXHSICNUMS 0x0118 + +/* TDM TX NUMTOTSYM, default value: 0x18 */ +#define REG_TTXTOTNUMS 0x0119 + +/* TDM TX INT Low, default value: 0x00 */ +#define REG_TTXINTL 0x0136 +#define BIT_TTXINTL_TTX_INTR7 BIT(7) +#define BIT_TTXINTL_TTX_INTR6 BIT(6) +#define BIT_TTXINTL_TTX_INTR5 BIT(5) +#define BIT_TTXINTL_TTX_INTR4 BIT(4) +#define BIT_TTXINTL_TTX_INTR3 BIT(3) +#define BIT_TTXINTL_TTX_INTR2 BIT(2) +#define BIT_TTXINTL_TTX_INTR1 BIT(1) +#define BIT_TTXINTL_TTX_INTR0 BIT(0) + +/* TDM TX INT High, default value: 0x00 */ +#define REG_TTXINTH 0x0137 +#define BIT_TTXINTH_TTX_INTR15 BIT(7) +#define BIT_TTXINTH_TTX_INTR14 BIT(6) +#define BIT_TTXINTH_TTX_INTR13 BIT(5) +#define BIT_TTXINTH_TTX_INTR12 BIT(4) +#define BIT_TTXINTH_TTX_INTR11 BIT(3) +#define BIT_TTXINTH_TTX_INTR10 BIT(2) +#define BIT_TTXINTH_TTX_INTR9 BIT(1) +#define BIT_TTXINTH_TTX_INTR8 BIT(0) + +/* TDM RX Control, default value: 0x1c */ +#define REG_TRXCTRL 0x013b +#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4) +#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3) +#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07 + +/* TDM RX NUMSPISYM, default value: 0x04 */ +#define REG_TRXSPINUMS 0x013c + +/* TDM RX NUMHSICSYM, default value: 0x14 */ +#define REG_TRXHSICNUMS 0x013d + +/* TDM RX NUMTOTSYM, default value: 0x18 */ +#define REG_TRXTOTNUMS 0x013e + +/* TDM RX Status 2nd, default value: 0x00 */ +#define REG_TRXSTA2 0x015c + +/* TDM RX INT Low, default value: 0x00 */ +#define REG_TRXINTL 0x0163 + +/* TDM RX INT High, default value: 0x00 */ +#define REG_TRXINTH 0x0164 + +/* TDM RX INTMASK High, default value: 0x00 */ +#define REG_TRXINTMH 0x0166 + +/* HSIC TX CRTL, default value: 0x00 */ +#define REG_HTXCTRL 0x0169 +#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4) +#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3) +#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2) +#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1) +#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0) + +/* HSIC TX INT Low, default value: 0x00 */ +#define REG_HTXINTL 0x017d + +/* HSIC TX INT High, default value: 0x00 */ +#define REG_HTXINTH 0x017e + +/* HSIC Keeper, default value: 0x00 */ +#define REG_KEEPER 0x0181 +#define MSK_KEEPER_KEEPER_MODE_1_0 0x03 + +/* HSIC Flow Control General, default value: 0x02 */ +#define REG_FCGC 0x0183 +#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1) +#define BIT_FCGC_HSIC_FC_ENABLE BIT(0) + +/* HSIC Flow Control CTR13, default value: 0xfc */ +#define REG_FCCTR13 0x0191 + +/* HSIC Flow Control CTR14, default value: 0xff */ +#define REG_FCCTR14 0x0192 + +/* HSIC Flow Control CTR15, default value: 0xff */ +#define REG_FCCTR15 0x0193 + +/* HSIC Flow Control CTR50, default value: 0x03 */ +#define REG_FCCTR50 0x01b6 + +/* HSIC Flow Control INTR0, default value: 0x00 */ +#define REG_FCINTR0 0x01ec +#define REG_FCINTR1 0x01ed +#define REG_FCINTR2 0x01ee +#define REG_FCINTR3 0x01ef +#define REG_FCINTR4 0x01f0 +#define REG_FCINTR5 0x01f1 +#define REG_FCINTR6 0x01f2 +#define REG_FCINTR7 0x01f3 + +/* TDM Low Latency, default value: 0x20 */ +#define REG_TDMLLCTL 0x01fc +#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0 +#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30 +#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c +#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1) +#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0) + +/* TMDS 0 Clock Control, default value: 0x10 */ +#define REG_TMDS0_CCTRL1 0x0210 +#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0 +#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30 + +/* TMDS Clock Enable, default value: 0x00 */ +#define REG_TMDS_CLK_EN 0x0211 +#define BIT_TMDS_CLK_EN_CLK_EN BIT(0) + +/* TMDS Channel Enable, default value: 0x00 */ +#define REG_TMDS_CH_EN 0x0212 +#define BIT_TMDS_CH_EN_CH0_EN BIT(4) +#define BIT_TMDS_CH_EN_CH12_EN BIT(0) + +/* BGR_BIAS, default value: 0x07 */ +#define REG_BGR_BIAS 0x0215 +#define BIT_BGR_BIAS_BGR_EN BIT(7) +#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f + +/* TMDS 0 Digital I2C BW, default value: 0x0a */ +#define REG_ALICE0_BW_I2C 0x0231 + +/* TMDS 0 Digital Zone Control, default value: 0xe0 */ +#define REG_ALICE0_ZONE_CTRL 0x024c +#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7) +#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6) +#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30 +#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f + +/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */ +#define REG_ALICE0_MODE_CTRL 0x024d +#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c +#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03 + +/* MHL Tx Control 6th, default value: 0xa0 */ +#define REG_MHLTX_CTL6 0x0285 +#define MSK_MHLTX_CTL6_EMI_SEL 0xe0 +#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03 + +/* Packet Filter0, default value: 0x00 */ +#define REG_PKT_FILTER_0 0x0290 +#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7) +#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6) +#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5) +#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4) +#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3) +#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2) +#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1) +#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0) + +/* Packet Filter1, default value: 0x00 */ +#define REG_PKT_FILTER_1 0x0291 +#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7) +#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6) +#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3) +#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2) +#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1) +#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0) + +/* TMDS Clock Status, default value: 0x10 */ +#define REG_TMDS_CSTAT_P3 0x02a0 +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7) +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6) +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5) +#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3) +#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2) +#define BIT_TMDS_CSTAT_P3_SCDT BIT(1) +#define BIT_TMDS_CSTAT_P3_CKDT BIT(0) + +/* RX_HDMI Control, default value: 0x10 */ +#define REG_RX_HDMI_CTRL0 0x02a1 +#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2) +#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1) +#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0) + +/* RX_HDMI Control, default value: 0x38 */ +#define REG_RX_HDMI_CTRL2 0x02a3 +#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0 +#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4) +#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3) +#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0) + +/* RX_HDMI Control, default value: 0x0f */ +#define REG_RX_HDMI_CTRL3 0x02a4 +#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f + +/* rx_hdmi Clear Buffer, default value: 0x00 */ +#define REG_RX_HDMI_CLR_BUFFER 0x02ac +#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0 +#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3) +#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2) +#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0) + +/* RX_HDMI VSI Header1, default value: 0x00 */ +#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8 + +/* RX_HDMI VSI MHL Monitor, default value: 0x3c */ +#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7 + +#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c +#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03 + +/* Interrupt Source 9, default value: 0x00 */ +#define REG_INTR9 0x02e0 +#define BIT_INTR9_EDID_ERROR BIT(6) +#define BIT_INTR9_EDID_DONE BIT(5) +#define BIT_INTR9_DEVCAP_DONE BIT(4) + +/* Interrupt 9 Mask, default value: 0x00 */ +#define REG_INTR9_MASK 0x02e1 + +/* TPI CBUS Start, default value: 0x00 */ +#define REG_TPI_CBUS_START 0x02e2 +#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7) +#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6) +#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5) +#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4) +#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3) +#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2) +#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1) +#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0) + +/* EDID Control, default value: 0x10 */ +#define REG_EDID_CTRL 0x02e3 +#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7) +#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6) +#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5) +#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4) +#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3) +#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2) +#define BIT_EDID_CTRL_INVALID_BKSV BIT(1) +#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0) + +/* EDID FIFO Addr, default value: 0x00 */ +#define REG_EDID_FIFO_ADDR 0x02e9 + +/* EDID FIFO Write Data, default value: 0x00 */ +#define REG_EDID_FIFO_WR_DATA 0x02ea + +/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */ +#define REG_EDID_FIFO_ADDR_MON 0x02eb + +/* EDID FIFO Read Data, default value: 0x00 */ +#define REG_EDID_FIFO_RD_DATA 0x02ec + +/* EDID DDC Segment Pointer, default value: 0x00 */ +#define REG_EDID_START_EXT 0x02ed + +/* TX IP BIST CNTL and Status, default value: 0x00 */ +#define REG_TX_IP_BIST_CNTLSTA 0x02f2 +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3) +#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0) + +/* TX IP BIST INST LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_INST_LOW 0x02f3 +#define REG_TX_IP_BIST_INST_HIGH 0x02f4 + +/* TX IP BIST PATTERN LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_PAT_LOW 0x02f5 +#define REG_TX_IP_BIST_PAT_HIGH 0x02f6 + +/* TX IP BIST CONFIGURE LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_CONF_LOW 0x02f7 +#define REG_TX_IP_BIST_CONF_HIGH 0x02f8 + +/* E-MSC General Control, default value: 0x80 */ +#define REG_GENCTL 0x0300 +#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7) +#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6) +#define BIT_GENCTL_SPI_MISO_EDGE BIT(5) +#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4) +#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3) +#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2) +#define BIT_GENCTL_START_TRAIN_SEQ BIT(1) +#define BIT_GENCTL_EMSC_EN BIT(0) + +/* E-MSC Comma ErrorCNT, default value: 0x03 */ +#define REG_COMMECNT 0x0305 +#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7) +#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f + +/* E-MSC RFIFO ByteCnt, default value: 0x00 */ +#define REG_EMSCRFIFOBCNTL 0x031a +#define REG_EMSCRFIFOBCNTH 0x031b + +/* SPI Burst Cnt Status, default value: 0x00 */ +#define REG_SPIBURSTCNT 0x031e + +/* SPI Burst Status and SWRST, default value: 0x00 */ +#define REG_SPIBURSTSTAT 0x0322 +#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7) +#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6) +#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5) +#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0) + +/* E-MSC 1st Interrupt, default value: 0x00 */ +#define REG_EMSCINTR 0x0323 +#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7) +#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6) +#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5) +#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4) +#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3) +#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2) +#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1) +#define BIT_EMSCINTR_SPI_DVLD BIT(0) + +/* E-MSC Interrupt Mask, default value: 0x00 */ +#define REG_EMSCINTRMASK 0x0324 + +/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */ +#define REG_EMSC_XMIT_WRITE_PORT 0x032a + +/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */ +#define REG_EMSC_RCV_READ_PORT 0x032b + +/* E-MSC 2nd Interrupt, default value: 0x00 */ +#define REG_EMSCINTR1 0x032c +#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0) + +/* E-MSC Interrupt Mask, default value: 0x00 */ +#define REG_EMSCINTRMASK1 0x032d +#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0) + +/* MHL Top Ctl, default value: 0x00 */ +#define REG_MHL_TOP_CTL 0x0330 +#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7) +#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6) +#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03 + +/* MHL DataPath 1st Ctl, default value: 0xbc */ +#define REG_MHL_DP_CTL0 0x0331 +#define BIT_MHL_DP_CTL0_DP_OE BIT(7) +#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6) +#define MSK_MHL_DP_CTL0_TX_OE 0x3f + +/* MHL DataPath 2nd Ctl, default value: 0xbb */ +#define REG_MHL_DP_CTL1 0x0332 +#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0 +#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f + +/* MHL DataPath 3rd Ctl, default value: 0x2f */ +#define REG_MHL_DP_CTL2 0x0333 +#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7) +#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30 +#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c +#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03 + +/* MHL DataPath 4th Ctl, default value: 0x48 */ +#define REG_MHL_DP_CTL3 0x0334 +#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0 +#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f + +/* MHL DataPath 5th Ctl, default value: 0x48 */ +#define REG_MHL_DP_CTL4 0x0335 +#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0 +#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f + +/* MHL DataPath 6th Ctl, default value: 0x3f */ +#define REG_MHL_DP_CTL5 0x0336 +#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7) +#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6) +#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30 +#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c +#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03 + +/* MHL PLL 1st Ctl, default value: 0x05 */ +#define REG_MHL_PLL_CTL0 0x0337 +#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7) + +#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00 + +#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08 +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04 +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00 + +#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1) +#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0) + +/* MHL PLL 3rd Ctl, default value: 0x80 */ +#define REG_MHL_PLL_CTL2 0x0339 +#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7) +#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3) +#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2) +#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03 + +/* MHL CBUS 1st Ctl, default value: 0x12 */ +#define REG_MHL_CBUS_CTL0 0x0340 +#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7) + +#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30 + +#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c + +#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03 + +/* MHL CBUS 2nd Ctl, default value: 0x03 */ +#define REG_MHL_CBUS_CTL1 0x0341 +#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07 +#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00 +#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04 +#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07 + +/* MHL CoC 1st Ctl, default value: 0xc3 */ +#define REG_MHL_COC_CTL0 0x0342 +#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7) +#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70 +#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07 + +/* MHL CoC 2nd Ctl, default value: 0x87 */ +#define REG_MHL_COC_CTL1 0x0343 +#define BIT_MHL_COC_CTL1_COC_EN BIT(7) +#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f + +/* MHL CoC 4th Ctl, default value: 0x00 */ +#define REG_MHL_COC_CTL3 0x0345 +#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0) + +/* MHL CoC 5th Ctl, default value: 0x28 */ +#define REG_MHL_COC_CTL4 0x0346 +#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0 +#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f + +/* MHL CoC 6th Ctl, default value: 0x0d */ +#define REG_MHL_COC_CTL5 0x0347 + +/* MHL DoC 1st Ctl, default value: 0x18 */ +#define REG_MHL_DOC_CTL0 0x0349 +#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7) +#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38 +#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06 +#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0) + +/* MHL DataPath 7th Ctl, default value: 0x2a */ +#define REG_MHL_DP_CTL6 0x0350 +#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5) +#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4) +#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3) +#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2) +#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1) +#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0) + +/* MHL DataPath 8th Ctl, default value: 0x06 */ +#define REG_MHL_DP_CTL7 0x0351 +#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0 +#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f + +/* Tx Zone Ctl1, default value: 0x00 */ +#define REG_TX_ZONE_CTL1 0x0361 +#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08 + +/* MHL3 Tx Zone Ctl, default value: 0x00 */ +#define REG_MHL3_TX_ZONE_CTL 0x0364 +#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7) +#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03 + +#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03 +#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00 +#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01 +#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02 + +/* HDCP Polling Control and Status, default value: 0x70 */ +#define REG_HDCP2X_POLL_CS 0x0391 + +#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6) +#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5) +#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4) +#define MSK_HDCP2X_POLL_CS_ 0x0c +#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1) +#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0) + +/* HDCP Interrupt 0, default value: 0x00 */ +#define REG_HDCP2X_INTR0 0x0398 + +/* HDCP Interrupt 0 Mask, default value: 0x00 */ +#define REG_HDCP2X_INTR0_MASK 0x0399 + +/* HDCP General Control 0, default value: 0x02 */ +#define REG_HDCP2X_CTRL_0 0x03a0 +#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7) +#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6) +#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5) +#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4) +#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3) +#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2) +#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1) +#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0) + +/* HDCP General Control 1, default value: 0x08 */ +#define REG_HDCP2X_CTRL_1 0x03a1 +#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0 +#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3) +#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2) +#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1) +#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0) + +/* HDCP Misc Control, default value: 0x00 */ +#define REG_HDCP2X_MISC_CTRL 0x03a5 +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0) + +/* HDCP RPT SMNG K, default value: 0x00 */ +#define REG_HDCP2X_RPT_SMNG_K 0x03a6 + +/* HDCP RPT SMNG In, default value: 0x00 */ +#define REG_HDCP2X_RPT_SMNG_IN 0x03a7 + +/* HDCP Auth Status, default value: 0x00 */ +#define REG_HDCP2X_AUTH_STAT 0x03aa + +/* HDCP RPT RCVID Out, default value: 0x00 */ +#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac + +/* HDCP TP1, default value: 0x62 */ +#define REG_HDCP2X_TP1 0x03b4 + +/* HDCP GP Out 0, default value: 0x00 */ +#define REG_HDCP2X_GP_OUT0 0x03c7 + +/* HDCP Repeater RCVR ID 0, default value: 0x00 */ +#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1 + +/* HDCP DDCM Status, default value: 0x00 */ +#define REG_HDCP2X_DDCM_STS 0x03d8 +#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0 +#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f + +/* HDMI2MHL3 Control, default value: 0x0a */ +#define REG_M3_CTRL 0x03e0 +#define BIT_M3_CTRL_H2M_SWRST BIT(4) +#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3) +#define BIT_M3_CTRL_M3AV_EN BIT(2) +#define BIT_M3_CTRL_ENC_TMDS BIT(1) +#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0) + +#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ + | BIT_M3_CTRL_ENC_TMDS) +#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ + | BIT_M3_CTRL_M3AV_EN \ + | BIT_M3_CTRL_ENC_TMDS \ + | BIT_M3_CTRL_MHL3_MASTER_EN) + +/* HDMI2MHL3 Port0 Control, default value: 0x04 */ +#define REG_M3_P0CTRL 0x03e1 +#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4) +#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3) +#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2) +#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1) +#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0) + +#define REG_M3_POSTM 0x03e2 +#define MSK_M3_POSTM_RRP_DECODE 0xf8 +#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07 + +/* HDMI2MHL3 Scramble Control, default value: 0x41 */ +#define REG_M3_SCTRL 0x03e6 +#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0 +#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0) + +/* HSIC Div Ctl, default value: 0x05 */ +#define REG_DIV_CTL_MAIN 0x03f2 +#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c +#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03 + +/* MHL Capability 1st Byte, default value: 0x00 */ +#define REG_MHL_DEVCAP_0 0x0400 + +/* MHL Interrupt 1st Byte, default value: 0x00 */ +#define REG_MHL_INT_0 0x0420 + +/* Device Status 1st byte, default value: 0x00 */ +#define REG_MHL_STAT_0 0x0430 + +/* CBUS Scratch Pad 1st Byte, default value: 0x00 */ +#define REG_MHL_SCRPAD_0 0x0440 + +/* MHL Extended Capability 1st Byte, default value: 0x00 */ +#define REG_MHL_EXTDEVCAP_0 0x0480 + +/* Device Extended Status 1st byte, default value: 0x00 */ +#define REG_MHL_EXTSTAT_0 0x0490 + +/* TPI DTD Byte2, default value: 0x00 */ +#define REG_TPI_DTD_B2 0x0602 + +#define VAL_TPI_QUAN_RANGE_LIMITED 0x01 +#define VAL_TPI_QUAN_RANGE_FULL 0x02 +#define VAL_TPI_FORMAT_RGB 0x00 +#define VAL_TPI_FORMAT_YCBCR444 0x01 +#define VAL_TPI_FORMAT_YCBCR422 0x02 +#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03 +#define VAL_TPI_FORMAT(_fmt, _qr) \ + (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2)) + +/* Input Format, default value: 0x00 */ +#define REG_TPI_INPUT 0x0609 +#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7) +#define BIT_TPI_INPUT_ENDITHER BIT(6) +#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c +#define MSK_TPI_INPUT_INPUT_FORMAT 0x03 + +/* Output Format, default value: 0x00 */ +#define REG_TPI_OUTPUT 0x060a +#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4) +#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c +#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03 + +/* TPI AVI Check Sum, default value: 0x00 */ +#define REG_TPI_AVI_CHSUM 0x060c + +/* TPI System Control, default value: 0x00 */ +#define REG_TPI_SC 0x061a +#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7) +#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6) +#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5) +#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4) +#define BIT_TPI_SC_TPI_AV_MUTE BIT(3) +#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2) +#define BIT_TPI_SC_DDC_TPI_SW BIT(1) +#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0) + +/* TPI COPP Query Data, default value: 0x00 */ +#define REG_TPI_COPP_DATA1 0x0629 +#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7) +#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6) +#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30 +#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00 +#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10 +#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20 +#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30 +#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3) +#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2) +#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1) +#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0) + +/* TPI COPP Control Data, default value: 0x00 */ +#define REG_TPI_COPP_DATA2 0x062a +#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5) +#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4) +#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3) +#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2) +#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1) +#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0) + +/* TPI Interrupt Enable, default value: 0x00 */ +#define REG_TPI_INTR_EN 0x063c + +/* TPI Interrupt Status Low Byte, default value: 0x00 */ +#define REG_TPI_INTR_ST0 0x063d +#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7) +#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6) +#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5) +#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3) +#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2) +#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1) +#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0) + +/* TPI DS BCAPS Status, default value: 0x00 */ +#define REG_TPI_DS_BCAPS 0x0644 + +/* TPI BStatus1, default value: 0x00 */ +#define REG_TPI_BSTATUS1 0x0645 +#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7) +#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f + +/* TPI BStatus2, default value: 0x10 */ +#define REG_TPI_BSTATUS2 0x0646 +#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0 +#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4) +#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3) +#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07 + +/* TPI HW Optimization Control #3, default value: 0x00 */ +#define REG_TPI_HW_OPT3 0x06bb +#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7) +#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3) +#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2) +#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03 + +/* TPI Info Frame Select, default value: 0x00 */ +#define REG_TPI_INFO_FSEL 0x06bf +#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7) +#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6) +#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5) +#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07 + +/* TPI Info Byte #0, default value: 0x00 */ +#define REG_TPI_INFO_B0 0x06c0 + +/* CoC Status, default value: 0x00 */ +#define REG_COC_STAT_0 0x0700 +#define REG_COC_STAT_1 0x0701 +#define REG_COC_STAT_2 0x0702 +#define REG_COC_STAT_3 0x0703 +#define REG_COC_STAT_4 0x0704 +#define REG_COC_STAT_5 0x0705 + +/* CoC 1st Ctl, default value: 0x40 */ +#define REG_COC_CTL0 0x0710 + +/* CoC 2nd Ctl, default value: 0x0a */ +#define REG_COC_CTL1 0x0711 +#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0 +#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f + +/* CoC 3rd Ctl, default value: 0x14 */ +#define REG_COC_CTL2 0x0712 +#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0 +#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f + +/* CoC 4th Ctl, default value: 0x40 */ +#define REG_COC_CTL3 0x0713 +#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7) +#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f + +/* CoC 7th Ctl, default value: 0x00 */ +#define REG_COC_CTL6 0x0716 +#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7) +#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6) +#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f + +/* CoC 8th Ctl, default value: 0x06 */ +#define REG_COC_CTL7 0x0717 +#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7) +#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6) +#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5) +#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18 +#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07 + +/* CoC 10th Ctl, default value: 0x00 */ +#define REG_COC_CTL9 0x0719 + +/* CoC 11th Ctl, default value: 0x00 */ +#define REG_COC_CTLA 0x071a + +/* CoC 12th Ctl, default value: 0x00 */ +#define REG_COC_CTLB 0x071b + +/* CoC 13th Ctl, default value: 0x0f */ +#define REG_COC_CTLC 0x071c + +/* CoC 14th Ctl, default value: 0x0a */ +#define REG_COC_CTLD 0x071d +#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7) +#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f + +/* CoC 15th Ctl, default value: 0x0a */ +#define REG_COC_CTLE 0x071e +#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7) +#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f + +/* CoC 16th Ctl, default value: 0x00 */ +#define REG_COC_CTLF 0x071f +#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8 +#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07 + +/* CoC 18th Ctl, default value: 0x32 */ +#define REG_COC_CTL11 0x0721 +#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0 +#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f + +/* CoC 21st Ctl, default value: 0x00 */ +#define REG_COC_CTL14 0x0724 +#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0 +#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f + +/* CoC 22nd Ctl, default value: 0x00 */ +#define REG_COC_CTL15 0x0725 +#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7) +#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70 +#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f + +/* CoC Interrupt, default value: 0x00 */ +#define REG_COC_INTR 0x0726 + +/* CoC Interrupt Mask, default value: 0x00 */ +#define REG_COC_INTR_MASK 0x0727 +#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0) +#define BIT_COC_CALIBRATION_DONE BIT(1) + +/* CoC Misc Ctl, default value: 0x00 */ +#define REG_COC_MISC_CTL0 0x0728 +#define BIT_COC_MISC_CTL0_FSM_MON BIT(7) + +/* CoC 24th Ctl, default value: 0x00 */ +#define REG_COC_CTL17 0x072a +#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0 +#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f + +/* CoC 25th Ctl, default value: 0x00 */ +#define REG_COC_CTL18 0x072b +#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0 +#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f + +/* CoC 26th Ctl, default value: 0x00 */ +#define REG_COC_CTL19 0x072c +#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0 +#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f + +/* CoC 27th Ctl, default value: 0x00 */ +#define REG_COC_CTL1A 0x072d +#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc +#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03 + +/* DoC 9th Status, default value: 0x00 */ +#define REG_DOC_STAT_8 0x0740 + +/* DoC 10th Status, default value: 0x00 */ +#define REG_DOC_STAT_9 0x0741 + +/* DoC 5th CFG, default value: 0x00 */ +#define REG_DOC_CFG4 0x074e +#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f + +/* DoC 1st Ctl, default value: 0x40 */ +#define REG_DOC_CTL0 0x0751 + +/* DoC 7th Ctl, default value: 0x00 */ +#define REG_DOC_CTL6 0x0757 +#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7) +#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6) +#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30 +#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f + +/* DoC 8th Ctl, default value: 0x00 */ +#define REG_DOC_CTL7 0x0758 +#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7) +#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6) +#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5) +#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18 +#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07 + +/* DoC 9th Ctl, default value: 0x00 */ +#define REG_DOC_CTL8 0x076c +#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7) +#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70 +#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c +#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03 + +/* DoC 10th Ctl, default value: 0x00 */ +#define REG_DOC_CTL9 0x076d + +/* DoC 11th Ctl, default value: 0x00 */ +#define REG_DOC_CTLA 0x076e + +/* DoC 15th Ctl, default value: 0x00 */ +#define REG_DOC_CTLE 0x0772 +#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7) +#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6) +#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30 +#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f + +/* Interrupt Mask 1st, default value: 0x00 */ +#define REG_MHL_INT_0_MASK 0x0580 + +/* Interrupt Mask 2nd, default value: 0x00 */ +#define REG_MHL_INT_1_MASK 0x0581 + +/* Interrupt Mask 3rd, default value: 0x00 */ +#define REG_MHL_INT_2_MASK 0x0582 + +/* Interrupt Mask 4th, default value: 0x00 */ +#define REG_MHL_INT_3_MASK 0x0583 + +/* MDT Receive Time Out, default value: 0x00 */ +#define REG_MDT_RCV_TIMEOUT 0x0584 + +/* MDT Transmit Time Out, default value: 0x00 */ +#define REG_MDT_XMIT_TIMEOUT 0x0585 + +/* MDT Receive Control, default value: 0x00 */ +#define REG_MDT_RCV_CTRL 0x0586 +#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7) +#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4) +#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3) +#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0) + +/* MDT Receive Read Port, default value: 0x00 */ +#define REG_MDT_RCV_READ_PORT 0x0587 + +/* MDT Transmit Control, default value: 0x70 */ +#define REG_MDT_XMIT_CTRL 0x0588 +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3) +#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2) +#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1) +#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0) + +/* MDT Receive WRITE Port, default value: 0x00 */ +#define REG_MDT_XMIT_WRITE_PORT 0x0589 + +/* MDT RFIFO Status, default value: 0x00 */ +#define REG_MDT_RFIFO_STAT 0x058a +#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0 +#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f + +/* MDT XFIFO Status, default value: 0x80 */ +#define REG_MDT_XFIFO_STAT 0x058b +#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0 +#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4) +#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f + +/* MDT Interrupt 0, default value: 0x0c */ +#define REG_MDT_INT_0 0x058c +#define BIT_MDT_RFIFO_DATA_RDY BIT(0) +#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2) +#define BIT_MDT_XFIFO_EMPTY BIT(3) + +/* MDT Interrupt 0 Mask, default value: 0x00 */ +#define REG_MDT_INT_0_MASK 0x058d + +/* MDT Interrupt 1, default value: 0x00 */ +#define REG_MDT_INT_1 0x058e +#define BIT_MDT_RCV_TIMEOUT BIT(0) +#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1) +#define BIT_MDT_RCV_SM_ERROR BIT(2) +#define BIT_MDT_XMIT_TIMEOUT BIT(5) +#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6) +#define BIT_MDT_XMIT_SM_ERROR BIT(7) + +/* MDT Interrupt 1 Mask, default value: 0x00 */ +#define REG_MDT_INT_1_MASK 0x058f + +/* CBUS Vendor ID, default value: 0x01 */ +#define REG_CBUS_VENDOR_ID 0x0590 + +/* CBUS Connection Status, default value: 0x00 */ +#define REG_CBUS_STATUS 0x0591 +#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4) +#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3) +#define BIT_CBUS_STATUS_CBUS_HPD BIT(2) +#define BIT_CBUS_STATUS_MHL_MODE BIT(1) +#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0) + +/* CBUS Interrupt 1st, default value: 0x00 */ +#define REG_CBUS_INT_0 0x0592 +#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7) +#define BIT_CBUS_MSC_MR_SET_INT BIT(6) +#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5) +#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4) +#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3) +#define BIT_CBUS_HPD_CHG BIT(2) +#define BIT_CBUS_MSC_MT_DONE BIT(1) +#define BIT_CBUS_CNX_CHG BIT(0) + +/* CBUS Interrupt Mask 1st, default value: 0x00 */ +#define REG_CBUS_INT_0_MASK 0x0593 + +/* CBUS Interrupt 2nd, default value: 0x00 */ +#define REG_CBUS_INT_1 0x0594 +#define BIT_CBUS_CMD_ABORT BIT(6) +#define BIT_CBUS_MSC_ABORT_RCVD BIT(3) +#define BIT_CBUS_DDC_ABORT BIT(2) +#define BIT_CBUS_CEC_ABORT BIT(1) + +/* CBUS Interrupt Mask 2nd, default value: 0x00 */ +#define REG_CBUS_INT_1_MASK 0x0595 + +/* CBUS DDC Abort Interrupt, default value: 0x00 */ +#define REG_DDC_ABORT_INT 0x0598 + +/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */ +#define REG_DDC_ABORT_INT_MASK 0x0599 + +/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */ +#define REG_MSC_MT_ABORT_INT 0x059a + +/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */ +#define REG_MSC_MT_ABORT_INT_MASK 0x059b + +/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */ +#define REG_MSC_MR_ABORT_INT 0x059c + +/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */ +#define REG_MSC_MR_ABORT_INT_MASK 0x059d + +/* CBUS RX DISCOVERY interrupt, default value: 0x00 */ +#define REG_CBUS_RX_DISC_INT0 0x059e + +/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */ +#define REG_CBUS_RX_DISC_INT0_MASK 0x059f + +/* CBUS_Link_Layer Control #8, default value: 0x00 */ +#define REG_CBUS_LINK_CTRL_8 0x05a7 + +/* MDT State Machine Status, default value: 0x00 */ +#define REG_MDT_SM_STAT 0x05b5 +#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0 +#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f + +/* CBUS MSC command trigger, default value: 0x00 */ +#define REG_MSC_COMMAND_START 0x05b8 +#define BIT_MSC_COMMAND_START_DEBUG BIT(5) +#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4) +#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3) +#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2) +#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1) +#define BIT_MSC_COMMAND_START_PEER BIT(0) + +/* CBUS MSC Command/Offset, default value: 0x00 */ +#define REG_MSC_CMD_OR_OFFSET 0x05b9 + +/* CBUS MSC Transmit Data */ +#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba +#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb + +/* CBUS MSC Requester Received Data */ +#define REG_MSC_MT_RCVD_DATA0 0x05bc +#define REG_MSC_MT_RCVD_DATA1 0x05bd + +/* CBUS MSC Responder MSC_MSG Received Data */ +#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf +#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0 + +/* CBUS MSC Heartbeat Control, default value: 0x27 */ +#define REG_MSC_HEARTBEAT_CTRL 0x05c4 +#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7) +#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70 +#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f + +/* CBUS MSC Compatibility Control, default value: 0x02 */ +#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7 +#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2) + +/* CBUS3 Converter Control, default value: 0x24 */ +#define REG_CBUS3_CNVT 0x05dc +#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0 +#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c +#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1) +#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0) + +/* Discovery Control1, default value: 0x24 */ +#define REG_DISC_CTRL1 0x05e0 +#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7) +#define BIT_DISC_CTRL1_HB_ONLY BIT(6) +#define MSK_DISC_CTRL1_DISC_ATT 0x30 +#define MSK_DISC_CTRL1_DISC_CYC 0x0c +#define BIT_DISC_CTRL1_DISC_EN BIT(0) + +#define VAL_PUP_OFF 0 +#define VAL_PUP_20K 1 +#define VAL_PUP_5K 2 + +/* Discovery Control4, default value: 0x80 */ +#define REG_DISC_CTRL4 0x05e3 +#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0 +#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30 +#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4)) + +/* Discovery Control5, default value: 0x03 */ +#define REG_DISC_CTRL5 0x05e4 +#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3) +#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03 + +/* Discovery Control8, default value: 0x81 */ +#define REG_DISC_CTRL8 0x05e7 +#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7) +#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0) + +/* Discovery Control9, default value: 0x54 */ +#define REG_DISC_CTRL9 0x05e8 +#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7) +#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6) +#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4) +#define BIT_DISC_CTRL9_NOMHL_EST BIT(3) +#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2) +#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1) +#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0) + +/* Discovery Status1, default value: 0x00 */ +#define REG_DISC_STAT1 0x05eb +#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5) +#define MSK_DISC_STAT1_DISC_SM 0x0f + +/* Discovery Status2, default value: 0x00 */ +#define REG_DISC_STAT2 0x05ec +#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6) +#define BIT_DISC_STAT2_CBUS_SATUS BIT(5) +#define BIT_DISC_STAT2_RSEN BIT(4) + +#define MSK_DISC_STAT2_MHL_VRSN 0x0c +#define VAL_DISC_STAT2_DEFAULT 0x00 +#define VAL_DISC_STAT2_MHL1_2 0x04 +#define VAL_DISC_STAT2_MHL3 0x08 +#define VAL_DISC_STAT2_RESERVED 0x0c + +#define MSK_DISC_STAT2_RGND 0x03 +#define VAL_RGND_OPEN 0x00 +#define VAL_RGND_2K 0x01 +#define VAL_RGND_1K 0x02 +#define VAL_RGND_SHORT 0x03 + +/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */ +#define REG_CBUS_DISC_INTR0 0x05ed +#define BIT_RGND_READY_INT BIT(6) +#define BIT_CBUS_MHL12_DISCON_INT BIT(5) +#define BIT_CBUS_MHL3_DISCON_INT BIT(4) +#define BIT_NOT_MHL_EST_INT BIT(3) +#define BIT_MHL_EST_INT BIT(2) +#define BIT_MHL3_EST_INT BIT(1) +#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \ + | BIT_CBUS_MHL3_DISCON_INT \ + | BIT_NOT_MHL_EST_INT) + +/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */ +#define REG_CBUS_DISC_INTR0_MASK 0x05ee + +#endif /* __SIL_SII8620_H__ */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f81706387889..c32fb3c1d6f0 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; - } else if (property == config->rotation_property || - property == plane->rotation_property) { + } else if (property == plane->rotation_property) { if (!is_power_of_2(val & DRM_ROTATE_MASK)) return -EINVAL; state->rotation = val; @@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; - } else if (property == config->rotation_property || - property == plane->rotation_property) { + } else if (property == plane->rotation_property) { *val = state->rotation; } else if (property == plane->zpos_property) { *val = state->zpos; @@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); static struct drm_pending_vblank_event *create_vblank_event( struct drm_device *dev, struct drm_file *file_priv, - struct fence *fence, uint64_t user_data) + struct dma_fence *fence, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; int ret; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f9362760bfb2..75ad01d595fd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -30,7 +30,7 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include "drm_crtc_internal.h" @@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); * drm_atomic_helper_swap_state() so it uses the current plane state (and * just uses the atomic state to find the changed planes) * - * Returns zero if success or < 0 if fence_wait() fails. + * Returns zero if success or < 0 if dma_fence_wait() fails. */ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, @@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, * still interrupt the operation. Instead of blocking until the * timer expires, make the wait interruptible. */ - ret = fence_wait(plane_state->fence, pre_swap); + ret = dma_fence_wait(plane_state->fence, pre_swap); if (ret) return ret; - fence_put(plane_state->fence); + dma_fence_put(plane_state->fence); plane_state->fence = NULL; } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index e52aece30900..1f2412c7ccfd 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -89,7 +89,7 @@ * On top of this basic transformation additional properties can be exposed by * the driver: * - * - Rotation is set up with drm_mode_create_rotation_property(). It adds a + * - Rotation is set up with drm_plane_create_rotation_property(). It adds a * rotation and reflection step between the source and destination rectangles. * Without this property the rectangle is only scaled, but not rotated or * reflected. @@ -105,18 +105,12 @@ */ /** - * drm_mode_create_rotation_property - create a new rotation property - * @dev: DRM device + * drm_plane_create_rotation_property - create a new rotation property + * @plane: drm plane + * @rotation: initial value of the rotation property * @supported_rotations: bitmask of supported rotations and reflections * * This creates a new property with the selected support for transformations. - * The resulting property should be stored in @rotation_property in - * &drm_mode_config. It then must be attached to each plane which supports - * rotations using drm_object_attach_property(). - * - * FIXME: Probably better if the rotation property is created on each plane, - * like the zpos property. Otherwise it's not possible to allow different - * rotation modes on different planes. * * Since a rotation by 180° degress is the same as reflecting both along the x * and the y axis the rotation property is somewhat redundant. Drivers can use @@ -144,24 +138,6 @@ * rotation. After reflection, the rotation is applied to the image sampled from * the source rectangle, before scaling it to fit the destination rectangle. */ -struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, - unsigned int supported_rotations) -{ - static const struct drm_prop_enum_list props[] = { - { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, - { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, - { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, - { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, - { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, - { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, - }; - - return drm_property_create_bitmask(dev, 0, "rotation", - props, ARRAY_SIZE(props), - supported_rotations); -} -EXPORT_SYMBOL(drm_mode_create_rotation_property); - int drm_plane_create_rotation_property(struct drm_plane *plane, unsigned int rotation, unsigned int supported_rotations) diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 488355bdafb9..e02563966271 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) sizeof(dp_dual_mode_hdmi_id)) == 0; } +static bool is_type1_adaptor(uint8_t adaptor_id) +{ + return adaptor_id == 0 || adaptor_id == 0xff; +} + static bool is_type2_adaptor(uint8_t adaptor_id) { return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | @@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, hdmi_id, sizeof(hdmi_id)); + DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n", + ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret); if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; @@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); + DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", + adaptor_id, ret); if (ret == 0) { if (is_lspcon_adaptor(hdmi_id, adaptor_id)) return DRM_DP_DUAL_MODE_LSPCON; @@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) else return DRM_DP_DUAL_MODE_TYPE2_DVI; } + /* + * If neither a proper type 1 ID nor a broken type 1 adaptor + * as described above, assume type 1, but let the user know + * that we may have misdetected the type. + */ + if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", + adaptor_id); + } if (is_hdmi_adaptor(hdmi_id)) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95de47ba1e77..9506933b41cd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) return ret == xfers ? 0 : -1; } +static void connector_bad_edid(struct drm_connector *connector, + u8 *edid, int num_blocks) +{ + int i; + + if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) + return; + + dev_warn(connector->dev->dev, + "%s: EDID is invalid:\n", + connector->name); + for (i = 0; i < num_blocks; i++) { + u8 *block = edid + i * EDID_LENGTH; + char prefix[20]; + + if (drm_edid_is_zero(block, EDID_LENGTH)) + sprintf(prefix, "\t[%02x] ZERO ", i); + else if (!drm_edid_block_valid(block, i, false, NULL)) + sprintf(prefix, "\t[%02x] BAD ", i); + else + sprintf(prefix, "\t[%02x] GOOD ", i); + + print_hex_dump(KERN_WARNING, + prefix, DUMP_PREFIX_NONE, 16, 1, + block, EDID_LENGTH, false); + } +} + /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; - bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, for (i = 0; i < 4; i++) { if (get_edid_block(data, edid, 0, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(edid, 0, print_bad_edid, + if (drm_edid_block_valid(edid, 0, false, &connector->edid_corrupt)) break; if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { @@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, goto carp; /* if there's no extensions, we're done */ - if (edid[0x7e] == 0) + valid_extensions = edid[0x7e]; + if (valid_extensions == 0) return (struct edid *)edid; - new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); + new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; edid = new; for (j = 1; j <= edid[0x7e]; j++) { - u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH; + u8 *block = edid + j * EDID_LENGTH; for (i = 0; i < 4; i++) { if (get_edid_block(data, block, j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block, j, - print_bad_edid, NULL)) { - valid_extensions++; + if (drm_edid_block_valid(block, j, false, NULL)) break; - } } - if (i == 4 && print_bad_edid) { - dev_warn(connector->dev->dev, - "%s: Ignoring invalid EDID block %d.\n", - connector->name, j); - - connector->bad_edid_counter++; - } + if (i == 4) + valid_extensions--; } if (valid_extensions != edid[0x7e]) { + u8 *base; + + connector_bad_edid(connector, edid, edid[0x7e] + 1); + edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; edid[0x7e] = valid_extensions; - new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); + + new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; + + base = new; + for (i = 0; i <= edid[0x7e]; i++) { + u8 *block = edid + i * EDID_LENGTH; + + if (!drm_edid_block_valid(block, i, false, NULL)) + continue; + + memcpy(base, block, EDID_LENGTH); + base += EDID_LENGTH; + } + + kfree(edid); edid = new; } return (struct edid *)edid; carp: - if (print_bad_edid) { - dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", - connector->name, j); - } - connector->bad_edid_counter++; - + connector_bad_edid(connector, edid, 1); out: kfree(edid); return NULL; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e0d428f9d1cb..83dbae0fabcf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) if (plane->type != DRM_PLANE_TYPE_PRIMARY) drm_plane_force_disable(plane); - if (plane->rotation_property) { + if (plane->rotation_property) drm_mode_plane_set_obj_prop(plane, plane->rotation_property, DRM_ROTATE_0); - } else if (dev->mode_config.rotation_property) { - drm_mode_plane_set_obj_prop(plane, - dev->mode_config.rotation_property, - DRM_ROTATE_0); - } } for (i = 0; i < fb_helper->crtc_count; i++) { diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8bed5f459182..cf993dbf602e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); if (p->fence) - fence_put(p->fence); + dma_fence_put(p->fence); kfree(p); } @@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) } if (e->fence) { - fence_signal(e->fence); - fence_put(e->fence); + dma_fence_signal(e->fence); + dma_fence_put(e->fence); } if (!e->file_priv) { diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index bc98bb94264d..47848ed8ca48 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -6,6 +6,11 @@ #include <drm/drm_crtc.h> #include <drm/drm_of.h> +static void drm_release_of(struct device *dev, void *data) +{ + of_node_put(data); +} + /** * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node * @dev: DRM device @@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, EXPORT_SYMBOL(drm_of_find_possible_crtcs); /** + * drm_of_component_match_add - Add a component helper OF node match rule + * @master: master device + * @matchptr: component match pointer + * @compare: compare function used for matching component + * @node: of_node + */ +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ + of_node_get(node); + component_match_add_release(master, matchptr, drm_release_of, + compare, node); +} +EXPORT_SYMBOL_GPL(drm_of_component_match_add); + +/** * drm_of_component_probe - Generic probe function for a component based master * @dev: master device containing the OF node * @compare_of: compare function used for matching components @@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, port); + drm_of_component_match_add(dev, &match, compare_of, port); of_node_put(port); } @@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, + remote); of_node_put(remote); } of_node_put(port); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa687669e22b..0dee6acbd880 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -16,6 +16,7 @@ #include <linux/component.h> #include <linux/of_platform.h> +#include <drm/drm_of.h> #include "etnaviv_drv.h" #include "etnaviv_gpu.h" @@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!core_node) break; - component_match_add(&pdev->dev, &match, compare_of, - core_node); + drm_of_component_match_add(&pdev->dev, &match, + compare_of, core_node); of_node_put(core_node); } } else if (dev->platform_data) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 3755ef935af4..7d066a91d778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, } #ifdef CONFIG_DEBUG_FS -static void etnaviv_gem_describe_fence(struct fence *fence, +static void etnaviv_gem_describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) seq_printf(m, "\t%9s: %s %s seq %u\n", type, fence->ops->get_driver_name(fence), @@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index b1254f885fed..d2211825e5c8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -15,7 +15,7 @@ */ #include <linux/component.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/moduleparam.h> #include <linux/of_device.h> #include "etnaviv_dump.h" @@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work) for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { if (!gpu->event[i].used) continue; - fence_signal(gpu->event[i].fence); + dma_fence_signal(gpu->event[i].fence); gpu->event[i].fence = NULL; gpu->event[i].used = false; complete(&gpu->event_free); @@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) /* fence object management */ struct etnaviv_fence { struct etnaviv_gpu *gpu; - struct fence base; + struct dma_fence base; }; -static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) +static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) { return container_of(fence, struct etnaviv_fence, base); } -static const char *etnaviv_fence_get_driver_name(struct fence *fence) +static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) { return "etnaviv"; } -static const char *etnaviv_fence_get_timeline_name(struct fence *fence) +static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); return dev_name(f->gpu->dev); } -static bool etnaviv_fence_enable_signaling(struct fence *fence) +static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) { return true; } -static bool etnaviv_fence_signaled(struct fence *fence) +static bool etnaviv_fence_signaled(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); return fence_completed(f->gpu, f->base.seqno); } -static void etnaviv_fence_release(struct fence *fence) +static void etnaviv_fence_release(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); kfree_rcu(f, base.rcu); } -static const struct fence_ops etnaviv_fence_ops = { +static const struct dma_fence_ops etnaviv_fence_ops = { .get_driver_name = etnaviv_fence_get_driver_name, .get_timeline_name = etnaviv_fence_get_timeline_name, .enable_signaling = etnaviv_fence_enable_signaling, .signaled = etnaviv_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = etnaviv_fence_release, }; -static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) +static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) { struct etnaviv_fence *f; @@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) f->gpu = gpu; - fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, - gpu->fence_context, ++gpu->next_fence); + dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, + gpu->fence_context, ++gpu->next_fence); return &f->base; } @@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, { struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i, ret; if (!exclusive) { @@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, /* Wait on any existing exclusive fence which isn't our own */ fence = reservation_object_get_excl(robj); if (fence && fence->context != context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(robj)); if (fence->context != context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work) mutex_lock(&gpu->lock); list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { - if (!fence_is_signaled(cmdbuf->fence)) + if (!dma_fence_is_signaled(cmdbuf->fence)) break; list_del(&cmdbuf->node); - fence_put(cmdbuf->fence); + dma_fence_put(cmdbuf->fence); for (i = 0; i < cmdbuf->nr_bos; i++) { struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; @@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) { - struct fence *fence; + struct dma_fence *fence; unsigned int event, i; int ret; @@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data) } while ((event = ffs(intr)) != 0) { - struct fence *fence; + struct dma_fence *fence; event -= 1; @@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data) fence = gpu->event[event].fence; gpu->event[event].fence = NULL; - fence_signal(fence); + dma_fence_signal(fence); /* * Events can be processed out of order. Eg, @@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, return ret; gpu->drm = drm; - gpu->fence_context = fence_context_alloc(1); + gpu->fence_context = dma_fence_context_alloc(1); spin_lock_init(&gpu->fence_spinlock); INIT_LIST_HEAD(&gpu->active_cmd_list); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 73c278dc3706..8c6b824e9d0a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -89,7 +89,7 @@ struct etnaviv_chip_identity { struct etnaviv_event { bool used; - struct fence *fence; + struct dma_fence *fence; }; struct etnaviv_cmdbuf; @@ -163,7 +163,7 @@ struct etnaviv_cmdbuf { /* vram node used if the cmdbuf is mapped through the MMUv2 */ struct drm_mm_node vram_node; /* fence after which this buffer is to be disposed */ - struct fence *fence; + struct dma_fence *fence; /* target exec state */ u32 exec_state; /* per GPU in-flight list */ diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 90377a609c98..e88fde18c946 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -24,6 +24,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> #include "kirin_drm_drv.h" @@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np) DRM_ERROR("no valid endpoint node\n"); return ERR_PTR(-ENODEV); } - of_node_put(endpoint); remote = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); if (!remote) { DRM_ERROR("no valid remote node\n"); return ERR_PTR(-ENODEV); } - of_node_put(remote); if (!of_device_is_available(remote)) { DRM_ERROR("not available for remote node\n"); @@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev) if (IS_ERR(remote)) return PTR_ERR(remote); - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, remote); + of_node_put(remote); return component_master_add_with_match(dev, &kirin_drm_ops, match); diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9798d400d817..af8683e0dd54 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) mutex_unlock(&priv->audio_mutex); } -int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) +static int +tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) { struct tda998x_priv *priv = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 74ede1f53372..f9af2a00625e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -26,12 +26,12 @@ #include "i915_drv.h" -static const char *i915_fence_get_driver_name(struct fence *fence) +static const char *i915_fence_get_driver_name(struct dma_fence *fence) { return "i915"; } -static const char *i915_fence_get_timeline_name(struct fence *fence) +static const char *i915_fence_get_timeline_name(struct dma_fence *fence) { /* Timelines are bound by eviction to a VM. However, since * we only have a global seqno at the moment, we only have @@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence) return "global"; } -static bool i915_fence_signaled(struct fence *fence) +static bool i915_fence_signaled(struct dma_fence *fence) { return i915_gem_request_completed(to_request(fence)); } -static bool i915_fence_enable_signaling(struct fence *fence) +static bool i915_fence_enable_signaling(struct dma_fence *fence) { if (i915_fence_signaled(fence)) return false; @@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence) return true; } -static signed long i915_fence_wait(struct fence *fence, +static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout_jiffies) { @@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence, return timeout_jiffies; } -static void i915_fence_value_str(struct fence *fence, char *str, int size) +static void i915_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", fence->seqno); } -static void i915_fence_timeline_value_str(struct fence *fence, char *str, +static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", intel_engine_get_seqno(to_request(fence)->engine)); } -static void i915_fence_release(struct fence *fence) +static void i915_fence_release(struct dma_fence *fence) { struct drm_i915_gem_request *req = to_request(fence); kmem_cache_free(req->i915->requests, req); } -const struct fence_ops i915_fence_ops = { +const struct dma_fence_ops i915_fence_ops = { .get_driver_name = i915_fence_get_driver_name, .get_timeline_name = i915_fence_get_timeline_name, .enable_signaling = i915_fence_enable_signaling, @@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, * The reference count is incremented atomically. If it is zero, * the lookup knows the request is unallocated and complete. Otherwise, * it is either still in use, or has been reallocated and reset - * with fence_init(). This increment is safe for release as we check - * that the request we have a reference to and matches the active + * with dma_fence_init(). This increment is safe for release as we + * check that the request we have a reference to and matches the active * request. * * Before we increment the refcount, we chase the request->engine @@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, goto err; spin_lock_init(&req->lock); - fence_init(&req->fence, - &i915_fence_ops, - &req->lock, - engine->fence_context, - seqno); + dma_fence_init(&req->fence, + &i915_fence_ops, + &req->lock, + engine->fence_context, + seqno); i915_sw_fence_init(&req->submit, submit_notify); diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 974bd7bcc801..bceeaa3a5193 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -25,7 +25,7 @@ #ifndef I915_GEM_REQUEST_H #define I915_GEM_REQUEST_H -#include <linux/fence.h> +#include <linux/dma-fence.h> #include "i915_gem.h" #include "i915_sw_fence.h" @@ -62,7 +62,7 @@ struct intel_signal_node { * The requests are reference counted. */ struct drm_i915_gem_request { - struct fence fence; + struct dma_fence fence; spinlock_t lock; /** On Which ring this request was generated */ @@ -145,9 +145,9 @@ struct drm_i915_gem_request { struct list_head execlist_link; }; -extern const struct fence_ops i915_fence_ops; +extern const struct dma_fence_ops i915_fence_ops; -static inline bool fence_is_i915(struct fence *fence) +static inline bool fence_is_i915(struct dma_fence *fence) { return fence->ops == &i915_fence_ops; } @@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req) } static inline struct drm_i915_gem_request * -to_request(struct fence *fence) +to_request(struct dma_fence *fence) { /* We assume that NULL fence/request are interoperable */ BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); @@ -183,19 +183,19 @@ to_request(struct fence *fence) static inline struct drm_i915_gem_request * i915_gem_request_get(struct drm_i915_gem_request *req) { - return to_request(fence_get(&req->fence)); + return to_request(dma_fence_get(&req->fence)); } static inline struct drm_i915_gem_request * i915_gem_request_get_rcu(struct drm_i915_gem_request *req) { - return to_request(fence_get_rcu(&req->fence)); + return to_request(dma_fence_get_rcu(&req->fence)); } static inline void i915_gem_request_put(struct drm_i915_gem_request *req) { - fence_put(&req->fence); + dma_fence_put(&req->fence); } static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, @@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) * compiler. * * The atomic operation at the heart of - * i915_gem_request_get_rcu(), see fence_get_rcu(), is + * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is * atomic_inc_not_zero() which is only a full memory barrier * when successful. That is, if i915_gem_request_get_rcu() * returns the request (and so with the reference counted diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 1e5cbc585ca2..8185002d7ec8 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -8,7 +8,7 @@ */ #include <linux/slab.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/reservation.h> #include "i915_sw_fence.h" @@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, return pending; } -struct dma_fence_cb { - struct fence_cb base; +struct i915_sw_dma_fence_cb { + struct dma_fence_cb base; struct i915_sw_fence *fence; - struct fence *dma; + struct dma_fence *dma; struct timer_list timer; }; static void timer_i915_sw_fence_wake(unsigned long data) { - struct dma_fence_cb *cb = (struct dma_fence_cb *)data; + struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno); - fence_put(cb->dma); + dma_fence_put(cb->dma); cb->dma = NULL; i915_sw_fence_commit(cb->fence); cb->timer.function = NULL; } -static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data) +static void dma_i915_sw_fence_wake(struct dma_fence *dma, + struct dma_fence_cb *data) { - struct dma_fence_cb *cb = container_of(data, typeof(*cb), base); + struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); del_timer_sync(&cb->timer); if (cb->timer.function) i915_sw_fence_commit(cb->fence); - fence_put(cb->dma); + dma_fence_put(cb->dma); kfree(cb); } int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, - struct fence *dma, + struct dma_fence *dma, unsigned long timeout, gfp_t gfp) { - struct dma_fence_cb *cb; + struct i915_sw_dma_fence_cb *cb; int ret; - if (fence_is_signaled(dma)) + if (dma_fence_is_signaled(dma)) return 0; cb = kmalloc(sizeof(*cb), gfp); @@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, if (!gfpflags_allow_blocking(gfp)) return -ENOMEM; - return fence_wait(dma, false); + return dma_fence_wait(dma, false); } cb->fence = i915_sw_fence_get(fence); @@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, timer_i915_sw_fence_wake, (unsigned long)cb, TIMER_IRQSAFE); if (timeout) { - cb->dma = fence_get(dma); + cb->dma = dma_fence_get(dma); mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); } - ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); + ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); if (ret == 0) { ret = 1; } else { @@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, - const struct fence_ops *exclude, + const struct dma_fence_ops *exclude, bool write, unsigned long timeout, gfp_t gfp) { - struct fence *excl; + struct dma_fence *excl; int ret = 0, pending; if (write) { - struct fence **shared; + struct dma_fence **shared; unsigned int count, i; ret = reservation_object_get_fences_rcu(resv, @@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, } for (i = 0; i < count; i++) - fence_put(shared[i]); + dma_fence_put(shared[i]); kfree(shared); } else { excl = reservation_object_get_excl_rcu(resv); @@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, ret |= pending; } - fence_put(excl); + dma_fence_put(excl); return ret; } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 373141602ca4..cd239e92f67f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -16,8 +16,8 @@ #include <linux/wait.h> struct completion; -struct fence; -struct fence_ops; +struct dma_fence; +struct dma_fence_ops; struct reservation_object; struct i915_sw_fence { @@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *after, wait_queue_t *wq); int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, - struct fence *dma, + struct dma_fence *dma, unsigned long timeout, gfp_t gfp); int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, - const struct fence_ops *exclude, + const struct dma_fence_ops *exclude, bool write, unsigned long timeout, gfp_t gfp); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 178798002a73..5c912c25f7d3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, __entry->ring = req->engine->id; __entry->seqno = req->fence.seqno; __entry->flags = flags; - fence_enable_sw_signaling(&req->fence); + dma_fence_enable_sw_signaling(&req->fence); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 23fc1042fed4..56efcc507ea2 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg) &request->signaling.wait); local_bh_disable(); - fence_signal(&request->fence); + dma_fence_signal(&request->fence); local_bh_enable(); /* kick start the tasklets */ /* Find the next oldest signal. Note that as we have @@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) struct rb_node *parent, **p; bool first, wakeup; - /* locked by fence_enable_sw_signaling() */ + /* locked by dma_fence_enable_sw_signaling() */ assert_spin_locked(&request->lock); request->signaling.wait.tsk = b->signaler; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f30db8f2425e..3c2293bd24bf 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1459,8 +1459,7 @@ static void intel_dp_print_hw_revision(struct intel_dp *intel_dp) if ((drm_debug & DRM_UT_KMS) == 0) return; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return; len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1); @@ -1478,8 +1477,7 @@ static void intel_dp_print_sw_revision(struct intel_dp *intel_dp) if ((drm_debug & DRM_UT_KMS) == 0) return; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return; len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2); @@ -3615,8 +3613,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (!is_edp(intel_dp) && !intel_dp->sink_count) return false; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return true; /* native DP sink */ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) @@ -4134,7 +4131,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) return connector_status_connected; /* if there's no downstream port, we're done */ - if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(dpcd)) return connector_status_connected; /* If we're HPD-aware, SINK_COUNT changes dynamically */ diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2dc94812bea5..8cceb345aa0f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) INIT_LIST_HEAD(&engine->execlist_queue); spin_lock_init(&engine->execlist_lock); - engine->fence_context = fence_context_alloc(1); + engine->fence_context = dma_fence_context_alloc(1); intel_engine_init_requests(engine); intel_engine_init_hangcheck(engine); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index db61aa5f32ef..296f541fbe2f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> #include <linux/component.h> #include <linux/iommu.h> #include <linux/of_address.h> @@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DPI) { dev_info(dev, "Adding component match for %s\n", node->full_name); - component_match_add(dev, &match, compare_of, node); + drm_of_component_match_add(dev, &match, compare_of, + node); } else { struct mtk_ddp_comp *comp; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 5127b75dbf40..7250ffc6322f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -25,9 +25,6 @@ bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); -struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); - static const struct adreno_info gpulist[] = { { .rev = ADRENO_REV(3, 0, 5, ANY_ID), diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a54f6e036b4a..07d99bdf7c99 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, gpu_write(&gpu->base, reg - 1, data); } +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); + #endif /* __ADRENO_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..cf50d3ec8d1b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) return; - if (!dev->mode_config.rotation_property) - dev->mode_config.rotation_property = - drm_mode_create_rotation_property(dev, - DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); - - if (dev->mode_config.rotation_property) - drm_object_attach_property(&plane->base, - dev->mode_config.rotation_property, - DRM_ROTATE_0); + drm_plane_create_rotation_property(plane, + DRM_ROTATE_0, + DRM_ROTATE_0 | + DRM_ROTATE_180 | + DRM_REFLECT_X | + DRM_REFLECT_Y); } /* helper to install properties which are common to planes and crtcs */ @@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, plane_enabled(old_state), plane_enabled(state)); if (plane_enabled(state)) { + unsigned int rotation; + format = to_mdp_format(msm_framebuffer_format(state->fb)); if (MDP_FORMAT_IS_YUV(format) && !pipe_supports_yuv(mdp5_plane->caps)) { @@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - hflip = !!(state->rotation & DRM_REFLECT_X); - vflip = !!(state->rotation & DRM_REFLECT_Y); + rotation = drm_rotation_simplify(state->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); + hflip = !!(rotation & DRM_REFLECT_X); + vflip = !!(rotation & DRM_REFLECT_Y); + if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { dev_err(plane->dev->dev, @@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; + unsigned int rotation; bool vflip, hflip; unsigned long flags; int ret; @@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, config |= get_scale_config(format, src_h, crtc_h, false); DBG("scale config = %x", config); - hflip = !!(pstate->rotation & DRM_REFLECT_X); - vflip = !!(pstate->rotation & DRM_REFLECT_Y); + rotation = drm_rotation_simplify(pstate->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); + hflip = !!(rotation & DRM_REFLECT_X); + vflip = !!(rotation & DRM_REFLECT_Y); spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 663f2b6ef091..3c853733c99a 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -18,6 +18,7 @@ #ifdef CONFIG_DEBUG_FS #include "msm_drv.h" #include "msm_gpu.h" +#include "msm_debugfs.h" static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) { diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..84d38eaea585 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -15,6 +15,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <drm/drm_of.h> + #include "msm_drv.h" #include "msm_debugfs.h" #include "msm_fence.h" @@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev, continue; } - component_match_add(master_dev, matchptr, compare_of, intf); - + drm_of_component_match_add(master_dev, matchptr, compare_of, + intf); of_node_put(intf); of_node_put(ep_node); } @@ -962,8 +964,8 @@ static int add_display_components(struct device *dev, put_device(mdp_dev); /* add the MDP component itself */ - component_match_add(dev, matchptr, compare_of, - mdp_dev->of_node); + drm_of_component_match_add(dev, matchptr, compare_of, + mdp_dev->of_node); } else { /* MDP4 */ mdp_dev = dev; @@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - component_match_add(dev, matchptr, compare_of, np); + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d0da52f2a806..940bf4992fe2 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct fence *fence); + struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); void msm_gem_move_to_inactive(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index a9b9b1c95a2e..3f299c537b77 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -15,7 +15,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/fence.h> +#include <linux/dma-fence.h> #include "msm_drv.h" #include "msm_fence.h" @@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) fctx->dev = dev; fctx->name = name; - fctx->context = fence_context_alloc(1); + fctx->context = dma_fence_context_alloc(1); init_waitqueue_head(&fctx->event); spin_lock_init(&fctx->spinlock); @@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) struct msm_fence { struct msm_fence_context *fctx; - struct fence base; + struct dma_fence base; }; -static inline struct msm_fence *to_msm_fence(struct fence *fence) +static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) { return container_of(fence, struct msm_fence, base); } -static const char *msm_fence_get_driver_name(struct fence *fence) +static const char *msm_fence_get_driver_name(struct dma_fence *fence) { return "msm"; } -static const char *msm_fence_get_timeline_name(struct fence *fence) +static const char *msm_fence_get_timeline_name(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); return f->fctx->name; } -static bool msm_fence_enable_signaling(struct fence *fence) +static bool msm_fence_enable_signaling(struct dma_fence *fence) { return true; } -static bool msm_fence_signaled(struct fence *fence) +static bool msm_fence_signaled(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); return fence_completed(f->fctx, f->base.seqno); } -static void msm_fence_release(struct fence *fence) +static void msm_fence_release(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); kfree_rcu(f, base.rcu); } -static const struct fence_ops msm_fence_ops = { +static const struct dma_fence_ops msm_fence_ops = { .get_driver_name = msm_fence_get_driver_name, .get_timeline_name = msm_fence_get_timeline_name, .enable_signaling = msm_fence_enable_signaling, .signaled = msm_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = msm_fence_release, }; -struct fence * +struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx) { struct msm_fence *f; @@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx) f->fctx = fctx; - fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, - fctx->context, ++fctx->last_fence); + dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, + fctx->context, ++fctx->last_fence); return &f->base; } diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index ceb5b3d314b4..56061aa1959d 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); -struct fence * msm_fence_alloc(struct msm_fence_context *fctx); +struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); #endif diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..57db7dbbb618 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i, ret; if (!exclusive) { @@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = reservation_object_get_excl(msm_obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(msm_obj->resv)); if (fence->context != fctx->context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, } void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct fence *fence) + struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); @@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS -static void describe_fence(struct fence *fence, const char *type, +static void describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!fence_is_signaled(fence)) + if (!dma_fence_is_signaled(fence)) seq_printf(m, "\t%9s: %s %s seq %u\n", type, fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), @@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b2f13cfe945e..2cb8551fda70 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -104,7 +104,7 @@ struct msm_gem_submit { struct list_head node; /* node in gpu submit_list */ struct list_head bo_list; struct ww_acquire_ctx ticket; - struct fence *fence; + struct dma_fence *fence; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ unsigned int nr_cmds; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b6a0f37a65f3..25e8786fa4ca 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, void msm_gem_submit_free(struct msm_gem_submit *submit) { - fence_put(submit->fence); + dma_fence_put(submit->fence); list_del(&submit->node); put_pid(submit->pid); kfree(submit); @@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; struct msm_gpu *gpu = priv->gpu; - struct fence *in_fence = NULL; + struct dma_fence *in_fence = NULL; struct sync_file *sync_file = NULL; int out_fence_fd = -1; unsigned i; @@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, */ if (in_fence->context != gpu->fctx->context) { - ret = fence_wait(in_fence, true); + ret = dma_fence_wait(in_fence, true); if (ret) goto out; } @@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: if (in_fence) - fence_put(in_fence); + dma_fence_put(in_fence); submit_cleanup(submit); if (ret) msm_gem_submit_free(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..3249707e6834 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu) submit = list_first_entry(&gpu->submit_list, struct msm_gem_submit, node); - if (fence_is_signaled(submit->fence)) { + if (dma_fence_is_signaled(submit->fence)) { retire_submit(gpu, submit); } else { break; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e395cb6f511f..e0c0007689e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) static void nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, - struct fence *fence) + struct dma_fence *fence) { struct nouveau_drm *drm = nouveau_drm(dev); if (tile) { spin_lock(&drm->tile.lock); - tile->fence = (struct nouveau_fence *)fence_get(fence); + tile->fence = (struct nouveau_fence *)dma_fence_get(fence); tile->used = false; spin_unlock(&drm->tile.lock); } @@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 4bb9ab892ae1..e9529ee6bc23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -28,7 +28,7 @@ #include <linux/ktime.h> #include <linux/hrtimer.h> -#include <trace/events/fence.h> +#include <trace/events/dma_fence.h> #include <nvif/cl826e.h> #include <nvif/notify.h> @@ -38,11 +38,11 @@ #include "nouveau_dma.h" #include "nouveau_fence.h" -static const struct fence_ops nouveau_fence_ops_uevent; -static const struct fence_ops nouveau_fence_ops_legacy; +static const struct dma_fence_ops nouveau_fence_ops_uevent; +static const struct dma_fence_ops nouveau_fence_ops_legacy; static inline struct nouveau_fence * -from_fence(struct fence *fence) +from_fence(struct dma_fence *fence) { return container_of(fence, struct nouveau_fence, base); } @@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence) { int drop = 0; - fence_signal_locked(&fence->base); + dma_fence_signal_locked(&fence->base); list_del(&fence->head); rcu_assign_pointer(fence->channel, NULL); - if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { + if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { struct nouveau_fence_chan *fctx = nouveau_fctx(fence); if (!--fctx->notify_ref) drop = 1; } - fence_put(&fence->base); + dma_fence_put(&fence->base); return drop; } static struct nouveau_fence * -nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) { +nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) { struct nouveau_fence_priv *priv = (void*)drm->fence; if (fence->ops != &nouveau_fence_ops_legacy && @@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha struct nouveau_fence_work { struct work_struct work; - struct fence_cb cb; + struct dma_fence_cb cb; void (*func)(void *); void *data; }; @@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork) kfree(work); } -static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) +static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); @@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) } void -nouveau_fence_work(struct fence *fence, +nouveau_fence_work(struct dma_fence *fence, void (*func)(void *), void *data) { struct nouveau_fence_work *work; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto err; work = kmalloc(sizeof(*work), GFP_KERNEL); @@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence, work->func = func; work->data = data; - if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) + if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) goto err_free; return; @@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) fence->timeout = jiffies + (15 * HZ); if (priv->uevent) - fence_init(&fence->base, &nouveau_fence_ops_uevent, - &fctx->lock, fctx->context, ++fctx->sequence); + dma_fence_init(&fence->base, &nouveau_fence_ops_uevent, + &fctx->lock, fctx->context, ++fctx->sequence); else - fence_init(&fence->base, &nouveau_fence_ops_legacy, - &fctx->lock, fctx->context, ++fctx->sequence); + dma_fence_init(&fence->base, &nouveau_fence_ops_legacy, + &fctx->lock, fctx->context, ++fctx->sequence); kref_get(&fctx->fence_ref); - trace_fence_emit(&fence->base); + trace_dma_fence_emit(&fence->base); ret = fctx->emit(fence); if (!ret) { - fence_get(&fence->base); + dma_fence_get(&fence->base); spin_lock_irq(&fctx->lock); if (nouveau_fence_update(chan, fctx)) @@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence) struct nouveau_channel *chan; unsigned long flags; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return true; spin_lock_irqsave(&fctx->lock, flags); @@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence) nvif_notify_put(&fctx->notify); spin_unlock_irqrestore(&fctx->lock, flags); } - return fence_is_signaled(&fence->base); + return dma_fence_is_signaled(&fence->base); } static long -nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait) +nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) { struct nouveau_fence *fence = from_fence(f); unsigned long sleep_time = NSEC_PER_MSEC / 1000; @@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) if (!lazy) return nouveau_fence_wait_busy(fence, intr); - ret = fence_wait_timeout(&fence->base, intr, 15 * HZ); + ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); if (ret < 0) return ret; else if (!ret) @@ -391,7 +391,7 @@ int nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct fence *fence; + struct dma_fence *fence; struct reservation_object *resv = nvbo->bo.resv; struct reservation_object_list *fobj; struct nouveau_fence *f; @@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } if (must_wait) - ret = fence_wait(fence, intr); + ret = dma_fence_wait(fence, intr); return ret; } @@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } if (must_wait) - ret = fence_wait(fence, intr); + ret = dma_fence_wait(fence, intr); } return ret; @@ -456,7 +456,7 @@ void nouveau_fence_unref(struct nouveau_fence **pfence) { if (*pfence) - fence_put(&(*pfence)->base); + dma_fence_put(&(*pfence)->base); *pfence = NULL; } @@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, return ret; } -static const char *nouveau_fence_get_get_driver_name(struct fence *fence) +static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) { return "nouveau"; } -static const char *nouveau_fence_get_timeline_name(struct fence *f) +static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) * result. The drm node should still be there, so we can derive the index from * the fence context. */ -static bool nouveau_fence_is_signaled(struct fence *f) +static bool nouveau_fence_is_signaled(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f) return ret; } -static bool nouveau_fence_no_signaling(struct fence *f) +static bool nouveau_fence_no_signaling(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); @@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f) WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); /* - * This needs uevents to work correctly, but fence_add_callback relies on + * This needs uevents to work correctly, but dma_fence_add_callback relies on * being able to enable signaling. It will still get signaled eventually, * just not right away. */ if (nouveau_fence_is_signaled(f)) { list_del(&fence->head); - fence_put(&fence->base); + dma_fence_put(&fence->base); return false; } return true; } -static void nouveau_fence_release(struct fence *f) +static void nouveau_fence_release(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); kref_put(&fctx->fence_ref, nouveau_fence_context_put); - fence_free(&fence->base); + dma_fence_free(&fence->base); } -static const struct fence_ops nouveau_fence_ops_legacy = { +static const struct dma_fence_ops nouveau_fence_ops_legacy = { .get_driver_name = nouveau_fence_get_get_driver_name, .get_timeline_name = nouveau_fence_get_timeline_name, .enable_signaling = nouveau_fence_no_signaling, @@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = { .release = nouveau_fence_release }; -static bool nouveau_fence_enable_signaling(struct fence *f) +static bool nouveau_fence_enable_signaling(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f) ret = nouveau_fence_no_signaling(f); if (ret) - set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags); + set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); else if (!--fctx->notify_ref) nvif_notify_put(&fctx->notify); return ret; } -static const struct fence_ops nouveau_fence_ops_uevent = { +static const struct dma_fence_ops nouveau_fence_ops_uevent = { .get_driver_name = nouveau_fence_get_get_driver_name, .get_timeline_name = nouveau_fence_get_timeline_name, .enable_signaling = nouveau_fence_enable_signaling, .signaled = nouveau_fence_is_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = NULL }; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64c4ce7115ad..41f3c019e534 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -1,14 +1,14 @@ #ifndef __NOUVEAU_FENCE_H__ #define __NOUVEAU_FENCE_H__ -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <nvif/notify.h> struct nouveau_drm; struct nouveau_bo; struct nouveau_fence { - struct fence base; + struct dma_fence base; struct list_head head; @@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); bool nouveau_fence_done(struct nouveau_fence *); -void nouveau_fence_work(struct fence *, void (*)(void *), void *); +void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0bd7164bc817..7f083c95f422 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct reservation_object *resv = nvbo->bo.resv; struct reservation_object_list *fobj; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; fobj = reservation_object_get_list(resv); diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 1915b7b82a59..fa8f2375c398 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c @@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv04_fence_context_new; priv->base.context_del = nv04_fence_context_del; priv->base.contexts = 15; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 4e3de34ff6f4..f99fcf56928a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c @@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv10_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 31; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 7d5e562a55c5..79bc01111351 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv17_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 31; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 4d6f202b7770..8c5295414578 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv50_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 127; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 18bde9d8e6d6..23ef04b4e0b2 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv84_fence_context_del; priv->base.contexts = fifo->nr; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); priv->base.uevent = true; /* Use VRAM if there is any ; otherwise fallback to system memory */ diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 5f3e5ad99de7..84995ebc6ffc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -31,7 +31,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/workqueue.h> #include <linux/firmware.h> #include <linux/platform_device.h> @@ -190,7 +190,7 @@ enum { * spice-protocol/qxl_dev.h */ #define QXL_MAX_RES 96 struct qxl_release { - struct fence base; + struct dma_fence base; int id; int type; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cd83f050cf3e..50b4e522f05f 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -21,7 +21,7 @@ */ #include "qxl_drv.h" #include "qxl_object.h" -#include <trace/events/fence.h> +#include <trace/events/dma_fence.h> /* * drawable cmd cache - allocate a bunch of VRAM pages, suballocate @@ -40,23 +40,24 @@ static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; -static const char *qxl_get_driver_name(struct fence *fence) +static const char *qxl_get_driver_name(struct dma_fence *fence) { return "qxl"; } -static const char *qxl_get_timeline_name(struct fence *fence) +static const char *qxl_get_timeline_name(struct dma_fence *fence) { return "release"; } -static bool qxl_nop_signaling(struct fence *fence) +static bool qxl_nop_signaling(struct dma_fence *fence) { /* fences are always automatically signaled, so just pretend we did this.. */ return true; } -static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) +static long qxl_fence_wait(struct dma_fence *fence, bool intr, + signed long timeout) { struct qxl_device *qdev; struct qxl_release *release; @@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) retry: sc++; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; qxl_io_notify_oom(qdev); @@ -80,11 +81,11 @@ retry: if (!qxl_queue_garbage_collect(qdev, true)) break; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; } - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; if (have_drawable_releases || sc < 4) { @@ -96,9 +97,9 @@ retry: return 0; if (have_drawable_releases && sc > 300) { - FENCE_WARN(fence, "failed to wait on release %llu " - "after spincount %d\n", - fence->context & ~0xf0000000, sc); + DMA_FENCE_WARN(fence, "failed to wait on release %llu " + "after spincount %d\n", + fence->context & ~0xf0000000, sc); goto signaled; } goto retry; @@ -115,7 +116,7 @@ signaled: return end - cur; } -static const struct fence_ops qxl_fence_ops = { +static const struct dma_fence_ops qxl_fence_ops = { .get_driver_name = qxl_get_driver_name, .get_timeline_name = qxl_get_timeline_name, .enable_signaling = qxl_nop_signaling, @@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev, WARN_ON(list_empty(&release->bos)); qxl_release_free_list(release); - fence_signal(&release->base); - fence_put(&release->base); + dma_fence_signal(&release->base); + dma_fence_put(&release->base); } else { qxl_release_free_list(release); kfree(release); @@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) * Since we never really allocated a context and we don't want to conflict, * set the highest bits. This will break if we really allow exporting of dma-bufs. */ - fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, - release->id | 0xf0000000, release->base.seqno); - trace_fence_emit(&release->base); + dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, + release->id | 0xf0000000, release->base.seqno); + trace_dma_fence_emit(&release->base); driver = bdev->driver; glob = bo->glob; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1b0dcad916b0..44e0c5ed6418 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -66,7 +66,7 @@ #include <linux/kref.h> #include <linux/interval_tree.h> #include <linux/hashtable.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -367,7 +367,7 @@ struct radeon_fence_driver { }; struct radeon_fence { - struct fence base; + struct dma_fence base; struct radeon_device *rdev; uint64_t seq; @@ -746,7 +746,7 @@ struct radeon_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; - struct fence *fence; + struct dma_fence *fence; bool async; }; @@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); /* * Cast helper */ -extern const struct fence_ops radeon_fence_ops; +extern const struct dma_fence_ops radeon_fence_ops; -static inline struct radeon_fence *to_radeon_fence(struct fence *f) +static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f) { struct radeon_fence *__f = container_of(f, struct radeon_fence, base); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 79c9b6f3f013..0be8d5cd7826 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev, for (i = 0; i < RADEON_NUM_RINGS; i++) { rdev->ring[i].idx = i; } - rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); + rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", radeon_family_name[rdev->family], pdev->vendor, pdev->device, diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index cdb8cb568c15..e7409e8a9f87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work) down_read(&rdev->exclusive_lock); } } else - r = fence_wait(work->fence, false); + r = dma_fence_wait(work->fence, false); if (r) DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); @@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work) * confused about which BO the CRTC is scanning out */ - fence_put(work->fence); + dma_fence_put(work->fence); work->fence = NULL; } @@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); + work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); @@ -617,7 +617,7 @@ pflip_cleanup: cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - fence_put(work->fence); + dma_fence_put(work->fence); kfree(work); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ef075acde9c..ef09f0a63754 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev, (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; (*fence)->ring = ring; (*fence)->is_vm_update = false; - fence_init(&(*fence)->base, &radeon_fence_ops, - &rdev->fence_queue.lock, rdev->fence_context + ring, seq); + dma_fence_init(&(*fence)->base, &radeon_fence_ops, + &rdev->fence_queue.lock, + rdev->fence_context + ring, + seq); radeon_fence_ring_emit(rdev, ring, *fence); trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); radeon_fence_schedule_check(rdev, ring); @@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl */ seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); if (seq >= fence->seq) { - int ret = fence_signal_locked(&fence->base); + int ret = dma_fence_signal_locked(&fence->base); if (!ret) - FENCE_TRACE(&fence->base, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); else - FENCE_TRACE(&fence->base, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); - fence_put(&fence->base); + dma_fence_put(&fence->base); } else - FENCE_TRACE(&fence->base, "pending\n"); + DMA_FENCE_TRACE(&fence->base, "pending\n"); return 0; } @@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, return false; } -static bool radeon_fence_is_signaled(struct fence *f) +static bool radeon_fence_is_signaled(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; @@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f) * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ -static bool radeon_fence_enable_signaling(struct fence *f) +static bool radeon_fence_enable_signaling(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; @@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f) fence->fence_wake.private = NULL; fence->fence_wake.func = radeon_fence_check_signaled; __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); - fence_get(f); + dma_fence_get(f); - FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); return true; } @@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence) if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { int ret; - ret = fence_signal(&fence->base); + ret = dma_fence_signal(&fence->base); if (!ret) - FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); return true; } return false; @@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo * exclusive_lock is not held in that case. */ if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) - return fence_wait(&fence->base, intr); + return dma_fence_wait(&fence->base, intr); seq[fence->ring] = fence->seq; r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); @@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo return r; } - r_sig = fence_signal(&fence->base); + r_sig = dma_fence_signal(&fence->base); if (!r_sig) - FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); return r; } @@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) */ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) { - fence_get(&fence->base); + dma_fence_get(&fence->base); return fence; } @@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence) *fence = NULL; if (tmp) { - fence_put(&tmp->base); + dma_fence_put(&tmp->base); } } @@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev) #endif } -static const char *radeon_fence_get_driver_name(struct fence *fence) +static const char *radeon_fence_get_driver_name(struct dma_fence *fence) { return "radeon"; } -static const char *radeon_fence_get_timeline_name(struct fence *f) +static const char *radeon_fence_get_timeline_name(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); switch (fence->ring) { @@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f) static inline bool radeon_test_signaled(struct radeon_fence *fence) { - return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } struct radeon_wait_cb { - struct fence_cb base; + struct dma_fence_cb base; struct task_struct *task; }; static void -radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct radeon_wait_cb *wait = container_of(cb, struct radeon_wait_cb, base); @@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) wake_up_process(wait->task); } -static signed long radeon_fence_default_wait(struct fence *f, bool intr, +static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); @@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, cb.task = current; - if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) return t; while (t > 0) { @@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, } __set_current_state(TASK_RUNNING); - fence_remove_callback(f, &cb.base); + dma_fence_remove_callback(f, &cb.base); return t; } -const struct fence_ops radeon_fence_ops = { +const struct dma_fence_ops radeon_fence_ops = { .get_driver_name = radeon_fence_get_driver_name, .get_timeline_name = radeon_fence_get_timeline_name, .enable_signaling = radeon_fence_enable_signaling, diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 02ac8a1de4ff..be5d7a38d3aa 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev, bool shared) { struct reservation_object_list *flist; - struct fence *f; + struct dma_fence *f; struct radeon_fence *fence; unsigned i; int r = 0; @@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev, if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else if (f) - r = fence_wait(f, true); + r = dma_fence_wait(f, true); flist = reservation_object_get_list(resv); if (shared || !flist || r) @@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev, if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else - r = fence_wait(f, true); + r = dma_fence_wait(f, true); if (r) break; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 0cd0e7bdee55..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; - struct fence *f; + struct dma_fence *f; void *ptr; int i, r; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8c8cbe837e61..6fe161192bb4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/module.h> @@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } @@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) } of_node_put(iommu); - component_match_add(dev, &match, compare_of, port->parent); + drm_of_component_match_add(dev, &match, compare_of, + port->parent); of_node_put(port); } diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 7087499969bc..6aead2013b62 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_of.h> #include "sti_crtc.h" #include "sti_drv.h" @@ -424,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev) child_np = of_get_next_available_child(node, NULL); while (child_np) { - component_match_add(dev, &match, compare_of, child_np); - of_node_put(child_np); + drm_of_component_match_add(dev, &match, compare_of, + child_np); child_np = of_get_next_available_child(node, child_np); } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..b3c4ad605e81 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_of.h> #include "sun4i_crtc.h" #include "sun4i_drv.h" @@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, /* Add current component */ DRM_DEBUG_DRIVER("Adding component %s\n", of_node_full_name(node)); - component_match_add(dev, match, compare_of, node); + drm_of_component_match_add(dev, match, compare_of, node); count++; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021005..06a4c584f3cb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -10,6 +10,7 @@ #include <linux/component.h> #include <linux/of_graph.h> +#include <drm/drm_of.h> #include "tilcdc_drv.h" #include "tilcdc_external.h" @@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev, dev_dbg(dev, "Subdevice node '%s' found\n", node->name); if (match) - component_match_add(dev, match, dev_match_of, node); + drm_of_component_match_add(dev, match, dev_match_of, + node); of_node_put(node); count++; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 31fcf11a2831..f6ff579e8918 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); - fence_put(bo->moving); + dma_fence_put(bo->moving); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i; fobj = reservation_object_get_list(bo->resv); fence = reservation_object_get_excl(bo->resv); if (fence && !fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); for (i = 0; fobj && i < fobj->shared_count; ++i) { fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(bo->resv)); if (!fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); } } @@ -801,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { - struct fence *fence; + struct dma_fence *fence; int ret; spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { @@ -815,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - fence_put(bo->moving); + dma_fence_put(bo->moving); bo->moving = fence; } @@ -1295,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; - struct fence *fence; + struct dma_fence *fence; int ret; /* @@ -1318,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_unlock(&glob->lru_lock); spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { - ret = fence_wait(fence, false); - fence_put(fence); + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); if (ret) { if (allow_errors) { return ret; @@ -1352,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } - fence_put(man->move); + dma_fence_put(man->move); man->use_type = false; man->has_type = false; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bf6e21655c57..d0459b392e5e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct fence *fence, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem) { @@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * operation has completed. */ - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct fence *fence, bool evict, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * operation has completed. */ - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, */ spin_lock(&from->move_lock); - if (!from->move || fence_is_later(fence, from->move)) { - fence_put(from->move); - from->move = fence_get(fence); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); } spin_unlock(&from->move_lock); ttm_bo_free_old_node(bo); - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); } else { /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a6ed9d5e5167..4748aedc933a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Quick non-stalling check for idle. */ - if (fence_is_signaled(bo->moving)) + if (dma_fence_is_signaled(bo->moving)) goto out_clear; /* @@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; up_read(&vma->vm_mm->mmap_sem); - (void) fence_wait(bo->moving, true); + (void) dma_fence_wait(bo->moving, true); goto out_unlock; } /* * Ordinary wait. */ - ret = fence_wait(bo->moving, true); + ret = dma_fence_wait(bo->moving, true); if (unlikely(ret != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; @@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, } out_clear: - fence_put(bo->moving); + dma_fence_put(bo->moving); bo->moving = NULL; out_unlock: diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index a80717b35dc6..d35bc491e8de 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, struct fence *fence) + struct list_head *list, + struct dma_fence *fence) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 5c57c1ffa1f9..488909a21ed8 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -28,56 +28,57 @@ #define VGEM_FENCE_TIMEOUT (10*HZ) struct vgem_fence { - struct fence base; + struct dma_fence base; struct spinlock lock; struct timer_list timer; }; -static const char *vgem_fence_get_driver_name(struct fence *fence) +static const char *vgem_fence_get_driver_name(struct dma_fence *fence) { return "vgem"; } -static const char *vgem_fence_get_timeline_name(struct fence *fence) +static const char *vgem_fence_get_timeline_name(struct dma_fence *fence) { return "unbound"; } -static bool vgem_fence_signaled(struct fence *fence) +static bool vgem_fence_signaled(struct dma_fence *fence) { return false; } -static bool vgem_fence_enable_signaling(struct fence *fence) +static bool vgem_fence_enable_signaling(struct dma_fence *fence) { return true; } -static void vgem_fence_release(struct fence *base) +static void vgem_fence_release(struct dma_fence *base) { struct vgem_fence *fence = container_of(base, typeof(*fence), base); del_timer_sync(&fence->timer); - fence_free(&fence->base); + dma_fence_free(&fence->base); } -static void vgem_fence_value_str(struct fence *fence, char *str, int size) +static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", fence->seqno); } -static void vgem_fence_timeline_value_str(struct fence *fence, char *str, +static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); + snprintf(str, size, "%u", + dma_fence_is_signaled(fence) ? fence->seqno : 0); } -static const struct fence_ops vgem_fence_ops = { +static const struct dma_fence_ops vgem_fence_ops = { .get_driver_name = vgem_fence_get_driver_name, .get_timeline_name = vgem_fence_get_timeline_name, .enable_signaling = vgem_fence_enable_signaling, .signaled = vgem_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = vgem_fence_release, .fence_value_str = vgem_fence_value_str, @@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data) { struct vgem_fence *fence = (struct vgem_fence *)data; - fence_signal(&fence->base); + dma_fence_signal(&fence->base); } -static struct fence *vgem_fence_create(struct vgem_file *vfile, - unsigned int flags) +static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, + unsigned int flags) { struct vgem_fence *fence; @@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile, return NULL; spin_lock_init(&fence->lock); - fence_init(&fence->base, &vgem_fence_ops, &fence->lock, - fence_context_alloc(1), 1); + dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, + dma_fence_context_alloc(1), 1); setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); @@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, struct vgem_file *vfile = file->driver_priv; struct reservation_object *resv; struct drm_gem_object *obj; - struct fence *fence; + struct dma_fence *fence; int ret; if (arg->flags & ~VGEM_FENCE_WRITE) @@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, } err_fence: if (ret) { - fence_signal(fence); - fence_put(fence); + dma_fence_signal(fence); + dma_fence_put(fence); } err: drm_gem_object_unreference_unlocked(obj); @@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, { struct vgem_file *vfile = file->driver_priv; struct drm_vgem_fence_signal *arg = data; - struct fence *fence; + struct dma_fence *fence; int ret = 0; if (arg->flags) @@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, if (IS_ERR(fence)) return PTR_ERR(fence); - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) ret = -ETIMEDOUT; - fence_signal(fence); - fence_put(fence); + dma_fence_signal(fence); + dma_fence_put(fence); return ret; } @@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile) static int __vgem_fence_idr_fini(int id, void *p, void *data) { - fence_signal(p); - fence_put(p); + dma_fence_signal(p); + dma_fence_put(p); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ae59080d63d1..ec1ebdcfe80b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver { }; struct virtio_gpu_fence { - struct fence f; + struct dma_fence f; struct virtio_gpu_fence_driver *drv; struct list_head node; uint64_t seq; diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f3f70fa8a4c7..23353521f903 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -26,22 +26,22 @@ #include <drm/drmP.h> #include "virtgpu_drv.h" -static const char *virtio_get_driver_name(struct fence *f) +static const char *virtio_get_driver_name(struct dma_fence *f) { return "virtio_gpu"; } -static const char *virtio_get_timeline_name(struct fence *f) +static const char *virtio_get_timeline_name(struct dma_fence *f) { return "controlq"; } -static bool virtio_enable_signaling(struct fence *f) +static bool virtio_enable_signaling(struct dma_fence *f) { return true; } -static bool virtio_signaled(struct fence *f) +static bool virtio_signaled(struct dma_fence *f) { struct virtio_gpu_fence *fence = to_virtio_fence(f); @@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f) return false; } -static void virtio_fence_value_str(struct fence *f, char *str, int size) +static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); snprintf(str, size, "%llu", fence->seq); } -static void virtio_timeline_value_str(struct fence *f, char *str, int size) +static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } -static const struct fence_ops virtio_fence_ops = { +static const struct dma_fence_ops virtio_fence_ops = { .get_driver_name = virtio_get_driver_name, .get_timeline_name = virtio_get_timeline_name, .enable_signaling = virtio_enable_signaling, .signaled = virtio_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .fence_value_str = virtio_fence_value_str, .timeline_value_str = virtio_timeline_value_str, }; @@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, spin_lock_irqsave(&drv->lock, irq_flags); (*fence)->drv = drv; (*fence)->seq = ++drv->sync_seq; - fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, - drv->context, (*fence)->seq); - fence_get(&(*fence)->f); + dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, + drv->context, (*fence)->seq); + dma_fence_get(&(*fence)->f); list_add_tail(&(*fence)->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); @@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, list_for_each_entry_safe(fence, tmp, &drv->fences, node) { if (last_seq < fence->seq) continue; - fence_signal_locked(&fence->f); + dma_fence_signal_locked(&fence->f); list_del(&fence->node); - fence_put(&fence->f); + dma_fence_put(&fence->f); } spin_unlock_irqrestore(&drv->lock, irq_flags); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 818478b4c4f0..61f3a963af95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, /* fence the command bo */ virtio_gpu_unref_list(&validate_list); drm_free_large(buflist); - fence_put(&fence->f); + dma_fence_put(&fence->f); return 0; out_unresv: @@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, drm_gem_object_release(obj); if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } return ret; } @@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } return 0; fail_unref: if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } //fail_obj: // drm_gem_object_handle_unreference_unlocked(obj); @@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, reservation_object_add_excl_fence(qobj->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); out_unres: virtio_gpu_object_unreserve(qobj); out: @@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, args->level, &box, &fence); reservation_object_add_excl_fence(qobj->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); } out_unres: diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 036b0fbae0fb..1235519853f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); - vgdev->fence_drv.context = fence_context_alloc(1); + vgdev->fence_drv.context = dma_fence_context_alloc(1); spin_lock_init(&vgdev->fence_drv.lock); INIT_LIST_HEAD(&vgdev->fence_drv.fences); INIT_LIST_HEAD(&vgdev->cap_cache); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index ba28c0f6f28a..cb75f0663ba0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!ret) { reservation_object_add_excl_fence(bo->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); fence = NULL; virtio_gpu_object_unreserve(bo); virtio_gpu_object_wait(bo, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 26ac8e80a478..6541dd8b82dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence) * objects with actions attached to them. */ -static void vmw_fence_obj_destroy(struct fence *f) +static void vmw_fence_obj_destroy(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f) fence->destroy(fence); } -static const char *vmw_fence_get_driver_name(struct fence *f) +static const char *vmw_fence_get_driver_name(struct dma_fence *f) { return "vmwgfx"; } -static const char *vmw_fence_get_timeline_name(struct fence *f) +static const char *vmw_fence_get_timeline_name(struct dma_fence *f) { return "svga"; } -static bool vmw_fence_enable_signaling(struct fence *f) +static bool vmw_fence_enable_signaling(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f) } struct vmwgfx_wait_cb { - struct fence_cb base; + struct dma_fence_cb base; struct task_struct *task; }; static void -vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) +vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct vmwgfx_wait_cb *wait = container_of(cb, struct vmwgfx_wait_cb, base); @@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) static void __vmw_fences_update(struct vmw_fence_manager *fman); -static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) +static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) while (ret > 0) { __vmw_fences_update(fman); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) break; if (intr) @@ -225,7 +225,7 @@ out: return ret; } -static struct fence_ops vmw_fence_ops = { +static struct dma_fence_ops vmw_fence_ops = { .get_driver_name = vmw_fence_get_driver_name, .get_timeline_name = vmw_fence_get_timeline_name, .enable_signaling = vmw_fence_enable_signaling, @@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) fman->event_fence_action_size = ttm_round_pot(sizeof(struct vmw_event_fence_action)); mutex_init(&fman->goal_irq_mutex); - fman->ctx = fence_context_alloc(1); + fman->ctx = dma_fence_context_alloc(1); return fman; } @@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, unsigned long irq_flags; int ret = 0; - fence_init(&fence->base, &vmw_fence_ops, &fman->lock, - fman->ctx, seqno); + dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, + fman->ctx, seqno); INIT_LIST_HEAD(&fence->seq_passed_actions); fence->destroy = destroy; @@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) u32 goal_seqno; u32 *fifo_mem; - if (fence_is_signaled_locked(&fence->base)) + if (dma_fence_is_signaled_locked(&fence->base)) return false; fifo_mem = fman->dev_priv->mmio_virt; @@ -459,7 +459,7 @@ rerun: list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { list_del_init(&fence->head); - fence_signal_locked(&fence->base); + dma_fence_signal_locked(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); @@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return 1; vmw_fences_update(fman); - return fence_is_signaled(&fence->base); + return dma_fence_is_signaled(&fence->base); } int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, bool interruptible, unsigned long timeout) { - long ret = fence_wait_timeout(&fence->base, interruptible, timeout); + long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); if (likely(ret > 0)) return 0; @@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) static void vmw_fence_destroy(struct vmw_fence_obj *fence) { - fence_free(&fence->base); + dma_fence_free(&fence->base); } int vmw_fence_create(struct vmw_fence_manager *fman, @@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) struct vmw_fence_obj *fence = list_entry(fman->fence_list.prev, struct vmw_fence_obj, head); - fence_get(&fence->base); + dma_fence_get(&fence->base); spin_unlock_irq(&fman->lock); ret = vmw_fence_obj_wait(fence, false, false, @@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) if (unlikely(ret != 0)) { list_del_init(&fence->head); - fence_signal(&fence->base); + dma_fence_signal(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); @@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) } BUG_ON(!list_empty(&fence->head)); - fence_put(&fence->base); + dma_fence_put(&fence->base); spin_lock_irq(&fman->lock); } spin_unlock_irq(&fman->lock); @@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, spin_lock_irqsave(&fman->lock, irq_flags); fman->pending_actions[action->type]++; - if (fence_is_signaled_locked(&fence->base)) { + if (dma_fence_is_signaled_locked(&fence->base)) { struct list_head action_list; INIT_LIST_HEAD(&action_list); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 83ae301ee141..d9d85aa6ed20 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -27,7 +27,7 @@ #ifndef _VMWGFX_FENCE_H_ -#include <linux/fence.h> +#include <linux/dma-fence.h> #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) @@ -52,7 +52,7 @@ struct vmw_fence_action { }; struct vmw_fence_obj { - struct fence base; + struct dma_fence base; struct list_head head; struct list_head seq_passed_actions; @@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) *fence_p = NULL; if (fence) - fence_put(&fence->base); + dma_fence_put(&fence->base); } static inline struct vmw_fence_obj * vmw_fence_obj_reference(struct vmw_fence_obj *fence) { if (fence) - fence_get(&fence->base); + dma_fence_get(&fence->base); return fence; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1a85fb2d4dc6..8e86d6d4141b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, if (fence == NULL) { vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); reservation_object_add_excl_fence(bo->resv, &fence->base); - fence_put(&fence->base); + dma_fence_put(&fence->base); } else reservation_object_add_excl_fence(bo->resv, &fence->base); } |