diff options
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_kms.c')
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_kms.c | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index c1f65c6c8e60..be8dd8262f27 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c) drm_atomic_helper_cleanup_planes(dev, state); - drm_atomic_state_free(state); + drm_atomic_state_put(state); up(&vc4->async_modeset); @@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev, /* Make sure that any outstanding modesets have finished. */ if (nonblock) { - ret = down_trylock(&vc4->async_modeset); - if (ret) { + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + unsigned long flags; + bool busy = false; + + /* + * If there's an undispatched event to send then we're + * obviously still busy. If there isn't, then we can + * unconditionally wait for the semaphore because it + * shouldn't be contended (for long). + * + * This is to prevent a race where queuing a new flip + * from userspace immediately on receipt of an event + * beats our clean-up and returns EBUSY. + */ + spin_lock_irqsave(&dev->event_lock, flags); + for_each_crtc_in_state(state, crtc, crtc_state, i) + busy |= vc4_event_pending(crtc); + spin_unlock_irqrestore(&dev->event_lock, flags); + if (busy) { kfree(c); return -EBUSY; } - } else { - ret = down_interruptible(&vc4->async_modeset); - if (ret) { - kfree(c); - return ret; - } + } + ret = down_interruptible(&vc4->async_modeset); + if (ret) { + kfree(c); + return ret; } ret = drm_atomic_helper_prepare_planes(dev, state); @@ -173,6 +190,7 @@ static int vc4_atomic_commit(struct drm_device *dev, * current layout. */ + drm_atomic_state_get(state); if (nonblock) { vc4_queue_seqno_cb(dev, &c->cb, wait_seqno, vc4_atomic_complete_commit_seqno_cb); |