summaryrefslogtreecommitdiff
path: root/drivers/dma-buf/reservation.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-25 22:48:26 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-25 22:48:26 +0300
commit4971f090aa7f6ce5daa094ce4334f6618f93a7eb (patch)
tree45d75782b7dedbec76a3ab82d2769f7707668071 /drivers/dma-buf/reservation.c
parentc76cd634eb5bfd497617ea224a54a03b545c8c4d (diff)
parent2a3c83f5fe0770d13bbb71b23674886ff4111f44 (diff)
downloadlinux-4971f090aa7f6ce5daa094ce4334f6618f93a7eb.tar.xz
Merge tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Core: - shared fencing staging removal - drop transactional atomic helpers and move helpers to new location - DP/MST atomic cleanup - Leasing cleanups and drop EXPORT_SYMBOL - Convert drivers to atomic helpers and generic fbdev. - removed deprecated obj_ref/unref in favour of get/put - Improve dumb callback documentation - MODESET_LOCK_BEGIN/END helpers panels: - CDTech panels, Banana Pi Panel, DLC1010GIG, - Olimex LCD-O-LinuXino, Samsung S6D16D0, Truly NT35597 WQXGA, - Himax HX8357D, simulated RTSM AEMv8. - GPD Win2 panel - AUO G101EVN010 vgem: - render node support ttm: - move global init out of drivers - fix LRU handling for ghost objects - Support for simultaneous submissions to multiple engines scheduler: - timeout/fault handling changes to help GPU recovery - helpers for hw with preemption support i915: - Scaler/Watermark fixes - DP MST + powerwell fixes - PSR fixes - Break long get/put shmemfs pages - Icelake fixes - Icelake DSI video mode enablement - Engine workaround improvements amdgpu: - freesync support - GPU reset enabled on CI, VI, SOC15 dGPUs - ABM support in DC - KFD support for vega12/polaris12 - SDMA paging queue on vega - More amdkfd code sharing - DCC scanout on GFX9 - DC kerneldoc - Updated SMU firmware for GFX8 chips - XGMI PSP + hive reset support - GPU reset - DC trace support - Powerplay updates for newer Polaris - Cursor plane update fast path - kfd dma-buf support virtio-gpu: - add EDID support vmwgfx: - pageflip with damage support nouveau: - Initial Turing TU104/TU106 modesetting support msm: - a2xx gpu support for apq8060 and imx5 - a2xx gpummu support - mdp4 display support for apq8060 - DPU fixes and cleanups - enhanced profiling support - debug object naming interface - get_iova/page pinning decoupling tegra: - Tegra194 host1x, VIC and display support enabled - Audio over HDMI for Tegra186 and Tegra194 exynos: - DMA/IOMMU refactoring - plane alpha + blend mode support - Color format fixes for mixer driver rcar-du: - R8A7744 and R8A77470 support - R8A77965 LVDS support imx: - fbdev emulation fix - multi-tiled scalling fixes - SPDX identifiers rockchip - dw_hdmi support - dw-mipi-dsi + dual dsi support - mailbox read size fix qxl: - fix cursor pinning vc4: - YUV support (scaling + cursor) v3d: - enable TFU (Texture Formatting Unit) mali-dp: - add support for linear tiled formats sun4i: - Display Engine 3 support - H6 DE3 mixer 0 support - H6 display engine support - dw-hdmi support - H6 HDMI phy support - implicit fence waiting - BGRX8888 support meson: - Overlay plane support - implicit fence waiting - HDMI 1.4 4k modes bridge: - i2c fixes for sii902x" * tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm: (1403 commits) drm/amd/display: Add fast path for cursor plane updates drm/amdgpu: Enable GPU recovery by default for CI drm/amd/display: Fix duplicating scaling/underscan connector state drm/amd/display: Fix unintialized max_bpc state values Revert "drm/amd/display: Set RMX_ASPECT as default" drm/amdgpu: Fix stub function name drm/msm/dpu: Fix clock issue after bind failure drm/msm/dpu: Clean up dpu_media_info.h static inline functions drm/msm/dpu: Further cleanups for static inline functions drm/msm/dpu: Cleanup the debugfs functions drm/msm/dpu: Remove dpu_irq and unused functions drm/msm: Make irq_postinstall optional drm/msm/dpu: Cleanup callers of dpu_hw_blk_init drm/msm/dpu: Remove unused functions drm/msm/dpu: Remove dpu_crtc_is_enabled() drm/msm/dpu: Remove dpu_crtc_get_mixer_height drm/msm/dpu: Remove dpu_dbg drm/msm: dpu: Remove crtc_lock drm/msm: dpu: Remove vblank_requested flag from dpu_crtc drm/msm: dpu: Separate crtc assignment from vblank enable ...
Diffstat (limited to 'drivers/dma-buf/reservation.c')
-rw-r--r--drivers/dma-buf/reservation.c189
1 files changed, 65 insertions, 124 deletions
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 6c95f61a32e7..c1618335ca99 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -56,9 +56,10 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
- * reservation_object_reserve_shared - Reserve space to add a shared
- * fence to a reservation_object.
+ * reservation_object_reserve_shared - Reserve space to add shared fences to
+ * a reservation_object.
* @obj: reservation object
+ * @num_fences: number of fences we want to add
*
* Should be called before reservation_object_add_shared_fence(). Must
* be called with obj->lock held.
@@ -66,107 +67,27 @@ EXPORT_SYMBOL(reservation_seqcount_string);
* RETURNS
* Zero for success, or -errno
*/
-int reservation_object_reserve_shared(struct reservation_object *obj)
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences)
{
- struct reservation_object_list *fobj, *old;
- u32 max;
+ struct reservation_object_list *old, *new;
+ unsigned int i, j, k, max;
old = reservation_object_get_list(obj);
if (old && old->shared_max) {
- if (old->shared_count < old->shared_max) {
- /* perform an in-place update */
- kfree(obj->staged);
- obj->staged = NULL;
+ if ((old->shared_count + num_fences) <= old->shared_max)
return 0;
- } else
- max = old->shared_max * 2;
- } else
- max = 4;
-
- /*
- * resize obj->staged or allocate if it doesn't exist,
- * noop if already correct size
- */
- fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
- GFP_KERNEL);
- if (!fobj)
- return -ENOMEM;
-
- obj->staged = fobj;
- fobj->shared_max = max;
- return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-static void
-reservation_object_add_shared_inplace(struct reservation_object *obj,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- struct dma_fence *signaled = NULL;
- u32 i, signaled_idx;
-
- dma_fence_get(fence);
-
- preempt_disable();
- write_seqcount_begin(&obj->seq);
-
- for (i = 0; i < fobj->shared_count; ++i) {
- struct dma_fence *old_fence;
-
- old_fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
-
- if (old_fence->context == fence->context) {
- /* memory barrier is added by write_seqcount_begin */
- RCU_INIT_POINTER(fobj->shared[i], fence);
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(old_fence);
- return;
- }
-
- if (!signaled && dma_fence_is_signaled(old_fence)) {
- signaled = old_fence;
- signaled_idx = i;
- }
- }
-
- /*
- * memory barrier is added by write_seqcount_begin,
- * fobj->shared_count is protected by this lock too
- */
- if (signaled) {
- RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+ else
+ max = max(old->shared_count + num_fences,
+ old->shared_max * 2);
} else {
- BUG_ON(fobj->shared_count >= fobj->shared_max);
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ max = 4;
}
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(signaled);
-}
-
-static void
-reservation_object_add_shared_replace(struct reservation_object *obj,
- struct reservation_object_list *old,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- unsigned i, j, k;
-
- dma_fence_get(fence);
-
- if (!old) {
- RCU_INIT_POINTER(fobj->shared[0], fence);
- fobj->shared_count = 1;
- goto done;
- }
+ new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
/*
* no need to bump fence refcounts, rcu_read access
@@ -174,46 +95,45 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* references from the old struct are carried over to
* the new.
*/
- for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
- struct dma_fence *check;
-
- check = rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj));
+ for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+ struct dma_fence *fence;
- if (check->context == fence->context ||
- dma_fence_is_signaled(check))
- RCU_INIT_POINTER(fobj->shared[--k], check);
+ fence = rcu_dereference_protected(old->shared[i],
+ reservation_object_held(obj));
+ if (dma_fence_is_signaled(fence))
+ RCU_INIT_POINTER(new->shared[--k], fence);
else
- RCU_INIT_POINTER(fobj->shared[j++], check);
+ RCU_INIT_POINTER(new->shared[j++], fence);
}
- fobj->shared_count = j;
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ new->shared_count = j;
+ new->shared_max = max;
-done:
preempt_disable();
write_seqcount_begin(&obj->seq);
/*
* RCU_INIT_POINTER can be used here,
* seqcount provides the necessary barriers
*/
- RCU_INIT_POINTER(obj->fence, fobj);
+ RCU_INIT_POINTER(obj->fence, new);
write_seqcount_end(&obj->seq);
preempt_enable();
if (!old)
- return;
+ return 0;
/* Drop the references to the signaled fences */
- for (i = k; i < fobj->shared_max; ++i) {
- struct dma_fence *f;
+ for (i = k; i < new->shared_max; ++i) {
+ struct dma_fence *fence;
- f = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
- dma_fence_put(f);
+ fence = rcu_dereference_protected(new->shared[i],
+ reservation_object_held(obj));
+ dma_fence_put(fence);
}
kfree_rcu(old, rcu);
+
+ return 0;
}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
/**
* reservation_object_add_shared_fence - Add a fence to a shared slot
@@ -226,15 +146,39 @@ done:
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence)
{
- struct reservation_object_list *old, *fobj = obj->staged;
+ struct reservation_object_list *fobj;
+ unsigned int i, count;
- old = reservation_object_get_list(obj);
- obj->staged = NULL;
+ dma_fence_get(fence);
- if (!fobj)
- reservation_object_add_shared_inplace(obj, old, fence);
- else
- reservation_object_add_shared_replace(obj, old, fobj, fence);
+ fobj = reservation_object_get_list(obj);
+ count = fobj->shared_count;
+
+ preempt_disable();
+ write_seqcount_begin(&obj->seq);
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *old_fence;
+
+ old_fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(obj));
+ if (old_fence->context == fence->context ||
+ dma_fence_is_signaled(old_fence)) {
+ dma_fence_put(old_fence);
+ goto replace;
+ }
+ }
+
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
+ count++;
+
+replace:
+ RCU_INIT_POINTER(fobj->shared[i], fence);
+ /* pointer update must be visible before we extend the shared_count */
+ smp_store_mb(fobj->shared_count, count);
+
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
@@ -343,9 +287,6 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- kfree(dst->staged);
- dst->staged = NULL;
-
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);