From eaa434defaca1781fb2932c685289b610aeb8b4b Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Thu, 28 Apr 2016 17:18:33 +0200 Subject: drm/fb-helper: Add fb_deferred_io support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds deferred io support to drm_fb_helper. The fbdev framebuffer changes are flushed using the callback (struct drm_framebuffer *)->funcs->dirty() by a dedicated worker ensuring that it always runs in process context. For those wondering why we need to be able to handle atomic calling contexts: Both panic paths and cursor code and fbcon blanking can run from atomic. See commit bcb39af4486be07e896fc374a2336bad3104ae0a Author: Dave Airlie Date: Thu Feb 7 11:19:15 2013 +1000 drm/udl: make usage as a console safer for where this was originally discovered. Signed-off-by: Noralf Trønnes Reviewed-by: Daniel Vetter [danvet: Augment commit message with why we need to handle atomic contexts.] Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1461856717-6476-4-git-send-email-noralf@tronnes.org --- include/drm/drm_fb_helper.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/drm') diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 062723bdcabe..5b4aa35026a3 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -172,6 +172,10 @@ struct drm_fb_helper_connector { * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors + * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @dirty_lock: spinlock protecting @dirty_clip + * @dirty_work: worker used to flush the framebuffer * * This is the main structure used by the fbdev helpers. Drivers supporting * fbdev emulation should embedded this into their overall driver structure. @@ -189,6 +193,9 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; /** * @kernel_fb_list: @@ -245,6 +252,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist); + ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, @@ -368,6 +378,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) { } +static inline void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ +} + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) -- cgit v1.2.3 From 199c77179c879e6cbca178bf67e9153dc89fb3d1 Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Thu, 28 Apr 2016 17:18:35 +0200 Subject: drm/fb-cma-helper: Add fb_deferred_io support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds fbdev deferred io support if CONFIG_FB_DEFERRED_IO is enabled. The driver has to provide a (struct drm_framebuffer_funcs *)->dirty() callback to get notification of fbdev framebuffer changes. If the dirty() hook is set, then fb_deferred_io is set up automatically by the helper. Two functions have been added so that the driver can provide a dirty() function: - drm_fbdev_cma_init_with_funcs() This makes it possible for the driver to provided a custom (struct drm_fb_helper_funcs *)->fb_probe() function. - drm_fbdev_cma_create_with_funcs() This is used by the .fb_probe hook to set a driver provided (struct drm_framebuffer_funcs *)->dirty() function. Cc: laurent.pinchart@ideasonboard.com Signed-off-by: Noralf Trønnes Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1461856717-6476-6-git-send-email-noralf@tronnes.org --- drivers/gpu/drm/drm_fb_cma_helper.c | 178 +++++++++++++++++++++++++++++++++--- include/drm/drm_fb_cma_helper.h | 14 +++ 2 files changed, 180 insertions(+), 12 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index bb88e3df9257..086f600a975a 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -25,6 +25,8 @@ #include #include +#define DEFAULT_FBDEFIO_DELAY_MS 50 + struct drm_fb_cma { struct drm_framebuffer fb; struct drm_gem_cma_object *obj[4]; @@ -35,6 +37,61 @@ struct drm_fbdev_cma { struct drm_fb_cma *fb; }; +/** + * DOC: framebuffer cma helper functions + * + * Provides helper functions for creating a cma (contiguous memory allocator) + * backed framebuffer. + * + * drm_fb_cma_create() is used in the + * (struct drm_mode_config_funcs *)->fb_create callback function to create the + * cma backed framebuffer. + * + * An fbdev framebuffer backed by cma is also available by calling + * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down. + * If CONFIG_FB_DEFERRED_IO is enabled and the callback + * (struct drm_framebuffer_funcs)->dirty is set, fb_deferred_io + * will be set up automatically. dirty() is called by + * drm_fb_helper_deferred_io() in process context (struct delayed_work). + * + * Example fbdev deferred io code: + * + * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, + * struct drm_file *file_priv, + * unsigned flags, unsigned color, + * struct drm_clip_rect *clips, + * unsigned num_clips) + * { + * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0); + * ... push changes ... + * return 0; + * } + * + * static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = { + * .destroy = drm_fb_cma_destroy, + * .create_handle = drm_fb_cma_create_handle, + * .dirty = driver_fbdev_fb_dirty, + * }; + * + * static int driver_fbdev_create(struct drm_fb_helper *helper, + * struct drm_fb_helper_surface_size *sizes) + * { + * return drm_fbdev_cma_create_with_funcs(helper, sizes, + * &driver_fbdev_fb_funcs); + * } + * + * static const struct drm_fb_helper_funcs driver_fb_helper_funcs = { + * .fb_probe = driver_fbdev_create, + * }; + * + * Initialize: + * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16, + * dev->mode_config.num_crtc, + * dev->mode_config.num_connector, + * &driver_fb_helper_funcs); + * + */ + static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper) { return container_of(helper, struct drm_fbdev_cma, fb_helper); @@ -45,7 +102,7 @@ static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb) return container_of(fb, struct drm_fb_cma, fb); } -static void drm_fb_cma_destroy(struct drm_framebuffer *fb) +void drm_fb_cma_destroy(struct drm_framebuffer *fb) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); int i; @@ -58,8 +115,9 @@ static void drm_fb_cma_destroy(struct drm_framebuffer *fb) drm_framebuffer_cleanup(fb); kfree(fb_cma); } +EXPORT_SYMBOL(drm_fb_cma_destroy); -static int drm_fb_cma_create_handle(struct drm_framebuffer *fb, +int drm_fb_cma_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -67,6 +125,7 @@ static int drm_fb_cma_create_handle(struct drm_framebuffer *fb, return drm_gem_handle_create(file_priv, &fb_cma->obj[0]->base, handle); } +EXPORT_SYMBOL(drm_fb_cma_create_handle); static struct drm_framebuffer_funcs drm_fb_cma_funcs = { .destroy = drm_fb_cma_destroy, @@ -76,7 +135,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = { static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, - unsigned int num_planes) + unsigned int num_planes, struct drm_framebuffer_funcs *funcs) { struct drm_fb_cma *fb_cma; int ret; @@ -91,7 +150,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, for (i = 0; i < num_planes; i++) fb_cma->obj[i] = obj[i]; - ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs); + ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs); if (ret) { dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret); kfree(fb_cma); @@ -145,7 +204,7 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, objs[i] = to_drm_gem_cma_obj(obj); } - fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i); + fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, &drm_fb_cma_funcs); if (IS_ERR(fb_cma)) { ret = PTR_ERR(fb_cma); goto err_gem_object_unreference; @@ -233,8 +292,67 @@ static struct fb_ops drm_fbdev_cma_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; -static int drm_fbdev_cma_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, + struct vm_area_struct *vma) +{ + fb_deferred_io_mmap(info, vma); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + return 0; +} + +static int drm_fbdev_cma_defio_init(struct fb_info *fbi, + struct drm_gem_cma_object *cma_obj) +{ + struct fb_deferred_io *fbdefio; + struct fb_ops *fbops; + + /* + * Per device structures are needed because: + * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap + * fbdefio: individual delays + */ + fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); + fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); + if (!fbdefio || !fbops) { + kfree(fbdefio); + return -ENOMEM; + } + + /* can't be offset from vaddr since dirty() uses cma_obj */ + fbi->screen_buffer = cma_obj->vaddr; + /* fb_deferred_io_fault() needs a physical address */ + fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer)); + + *fbops = *fbi->fbops; + fbi->fbops = fbops; + + fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS); + fbdefio->deferred_io = drm_fb_helper_deferred_io; + fbi->fbdefio = fbdefio; + fb_deferred_io_init(fbi); + fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap; + + return 0; +} + +static void drm_fbdev_cma_defio_fini(struct fb_info *fbi) +{ + if (!fbi->fbdefio) + return; + + fb_deferred_io_cleanup(fbi); + kfree(fbi->fbdefio); + kfree(fbi->fbops); +} + +/* + * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that + * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use. + */ +int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, + struct drm_framebuffer_funcs *funcs) { struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; @@ -270,7 +388,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, goto err_gem_free_object; } - fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1); + fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs); if (IS_ERR(fbdev_cma->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(fbdev_cma->fb); @@ -296,31 +414,48 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, fbi->screen_size = size; fbi->fix.smem_len = size; + if (funcs->dirty) { + ret = drm_fbdev_cma_defio_init(fbi, obj); + if (ret) + goto err_cma_destroy; + } + return 0; +err_cma_destroy: + drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); + drm_fb_cma_destroy(&fbdev_cma->fb->fb); err_fb_info_destroy: drm_fb_helper_release_fbi(helper); err_gem_free_object: dev->driver->gem_free_object(&obj->base); return ret; } +EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); + +static int drm_fbdev_cma_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs); +} static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { .fb_probe = drm_fbdev_cma_create, }; /** - * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct + * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct * @dev: DRM device * @preferred_bpp: Preferred bits per pixel for the device * @num_crtc: Number of CRTCs * @max_conn_count: Maximum number of connectors + * @funcs: fb helper functions, in particular fb_probe() * * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. */ -struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, +struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count) + unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs) { struct drm_fbdev_cma *fbdev_cma; struct drm_fb_helper *helper; @@ -334,7 +469,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, helper = &fbdev_cma->fb_helper; - drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); + drm_fb_helper_prepare(dev, helper, funcs); ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); if (ret < 0) { @@ -364,6 +499,24 @@ err_free: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs); + +/** + * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct + * @dev: DRM device + * @preferred_bpp: Preferred bits per pixel for the device + * @num_crtc: Number of CRTCs + * @max_conn_count: Maximum number of connectors + * + * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. + */ +struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count) +{ + return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc, + max_conn_count, &drm_fb_cma_helper_funcs); +} EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); /** @@ -373,6 +526,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) { drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); + drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev); drm_fb_helper_release_fbi(&fbdev_cma->fb_helper); if (fbdev_cma->fb) { diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index be62bd321e75..6554b6f38bb4 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -4,11 +4,18 @@ struct drm_fbdev_cma; struct drm_gem_cma_object; +struct drm_fb_helper_surface_size; +struct drm_framebuffer_funcs; +struct drm_fb_helper_funcs; struct drm_framebuffer; +struct drm_fb_helper; struct drm_device; struct drm_file; struct drm_mode_fb_cmd2; +struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs); struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count); @@ -16,6 +23,13 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); +int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, + struct drm_framebuffer_funcs *funcs); + +void drm_fb_cma_destroy(struct drm_framebuffer *fb); +int drm_fb_cma_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle); struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); -- cgit v1.2.3 From 286dbb8d5d800dcca23d43bb62d9f3cd96fe479c Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 26 Apr 2016 16:11:34 +0200 Subject: drm/atomic: Rename async parameter to nonblocking. This is the first step of renaming async commit to nonblocking commit. The flag passed by userspace is NONBLOCKING, and async has a different meaning for page flips, where it means as soon as possible. Fixing up comments in drm core is done manually, to make sure I didn't miss anything. For drivers, the following cocci script is used to rename bool async to bool nonblock: @@ identifier I =~ "^async"; identifier func; @@ func(..., bool - I + nonblock , ...) { <... - I + nonblock ...> } @@ identifier func; type T; identifier I =~ "^async"; @@ T func(..., bool - I + nonblock , ...); Thanks to Tvrtko Ursulin for the cocci script. Cc: Tvrtko Ursulin Signed-off-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1461679905-30177-2-git-send-email-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/drm_atomic_helper.c | 30 +++++++++++++++--------------- include/drm/drm_atomic_helper.h | 2 +- include/drm/drm_crtc.h | 8 ++++---- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 40c7b268a9bc..297713bab549 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1114,13 +1114,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * drm_atomic_helper_commit - commit validated state object * @dev: DRM device * @state: the driver state object - * @async: asynchronous commit + * @nonblocking: whether nonblocking behavior is requested. * * This function commits a with drm_atomic_helper_check() pre-validated state * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement asynchronous commits. + * now this doesn't implement nonblocking commits. * - * Note that right now this function does not support async commits, and hence + * Note that right now this function does not support nonblocking commits, hence * driver writers must implement their own version for now. Also note that the * default ordering of how the various stages are called is to match the legacy * modeset helper library closest. One peculiarity of that is that it doesn't @@ -1141,11 +1141,11 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); */ int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, - bool async) + bool nonblock) { int ret; - if (async) + if (nonblock) return -EBUSY; ret = drm_atomic_helper_prepare_planes(dev, state); @@ -1195,20 +1195,20 @@ int drm_atomic_helper_commit(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_commit); /** - * DOC: implementing async commit + * DOC: implementing nonblocking commit * - * For now the atomic helpers don't support async commit directly. If there is - * real need it could be added though, using the dma-buf fence infrastructure - * for generic synchronization with outstanding rendering. + * For now the atomic helpers don't support nonblocking commit directly. If + * there is real need it could be added though, using the dma-buf fence + * infrastructure for generic synchronization with outstanding rendering. * - * For now drivers have to implement async commit themselves, with the following - * sequence being the recommended one: + * For now drivers have to implement nonblocking commit themselves, with the + * following sequence being the recommended one: * * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function * which commit needs to call which can fail, so we want to run it first and * synchronously. * - * 2. Synchronize with any outstanding asynchronous commit worker threads which + * 2. Synchronize with any outstanding nonblocking commit worker threads which * might be affected the new state update. This can be done by either cancelling * or flushing the work items, depending upon whether the driver can deal with * cancelled updates. Note that it is important to ensure that the framebuffer @@ -1222,9 +1222,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * 3. The software state is updated synchronously with * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset * locks means concurrent callers never see inconsistent state. And doing this - * while it's guaranteed that no relevant async worker runs means that async - * workers do not need grab any locks. Actually they must not grab locks, for - * otherwise the work flushing will deadlock. + * while it's guaranteed that no relevant nonblocking worker runs means that + * nonblocking workers do not need grab any locks. Actually they must not grab + * locks, for otherwise the work flushing will deadlock. * * 4. Schedule a work item to do all subsequent steps, using the split-out * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index fe9d89c7d1ed..03642878bc51 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -40,7 +40,7 @@ int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, - bool async); + bool nonblock); void drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 43c31496a5a7..800c48e80811 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1887,7 +1887,7 @@ struct drm_mode_config_funcs { * drm_atomic_helper_commit(), or one of the exported sub-functions of * it. * - * Asynchronous commits (as indicated with the async parameter) must + * Nonblocking commits (as indicated with the nonblock parameter) must * do any preparatory work which might result in an unsuccessful commit * in the context of this callback. The only exceptions are hardware * errors resulting in -EIO. But even in that case the driver must @@ -1900,7 +1900,7 @@ struct drm_mode_config_funcs { * The driver must wait for any pending rendering to the new * framebuffers to complete before executing the flip. It should also * wait for any pending rendering from other drivers if the underlying - * buffer is a shared dma-buf. Asynchronous commits must not wait for + * buffer is a shared dma-buf. Nonblocking commits must not wait for * rendering in the context of this callback. * * An application can request to be notified when the atomic commit has @@ -1931,7 +1931,7 @@ struct drm_mode_config_funcs { * * 0 on success or one of the below negative error codes: * - * - -EBUSY, if an asynchronous updated is requested and there is + * - -EBUSY, if a nonblocking updated is requested and there is * an earlier updated pending. Drivers are allowed to support a queue * of outstanding updates, but currently no driver supports that. * Note that drivers must wait for preceding updates to complete if a @@ -1961,7 +1961,7 @@ struct drm_mode_config_funcs { */ int (*atomic_commit)(struct drm_device *dev, struct drm_atomic_state *state, - bool async); + bool nonblock); /** * @atomic_state_alloc: -- cgit v1.2.3 From b837ba0ad95bb5c08626a49321f07f271bdaf512 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 26 Apr 2016 16:11:35 +0200 Subject: drm/atomic: Rename drm_atomic_async_commit to nonblocking. Another step in renaming async to nonblocking for atomic commit. Signed-off-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1461679905-30177-3-git-send-email-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 12 ++++++------ drivers/gpu/drm/drm_atomic_helper.c | 4 ++-- include/drm/drm_atomic.h | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 2fabd8c4fd88..0b526423f19f 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -145,7 +145,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; /* - * FIXME: Async commits can race with connector unplugging and + * FIXME: Nonblocking commits can race with connector unplugging and * there's currently nothing that prevents cleanup up state for * deleted connectors. As long as the callback doesn't look at * the connector we'll be fine though, so make sure that's the @@ -1390,7 +1390,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) EXPORT_SYMBOL(drm_atomic_commit); /** - * drm_atomic_async_commit - atomic&async configuration commit + * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit * @state: atomic configuration to check * * Note that this function can return -EDEADLK if the driver needed to acquire @@ -1405,7 +1405,7 @@ EXPORT_SYMBOL(drm_atomic_commit); * Returns: * 0 on success, negative error code on failure. */ -int drm_atomic_async_commit(struct drm_atomic_state *state) +int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) { struct drm_mode_config *config = &state->dev->mode_config; int ret; @@ -1414,11 +1414,11 @@ int drm_atomic_async_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state); + DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state); return config->funcs->atomic_commit(state->dev, state, true); } -EXPORT_SYMBOL(drm_atomic_async_commit); +EXPORT_SYMBOL(drm_atomic_nonblocking_commit); /* * The big monstor ioctl @@ -1687,7 +1687,7 @@ retry: */ ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { - ret = drm_atomic_async_commit(state); + ret = drm_atomic_nonblocking_commit(state); } else { ret = drm_atomic_commit(state); } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 297713bab549..b04662e0e608 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2371,11 +2371,11 @@ retry: goto fail; } - ret = drm_atomic_async_commit(state); + ret = drm_atomic_nonblocking_commit(state); if (ret != 0) goto fail; - /* Driver takes ownership of state on successful async commit. */ + /* Driver takes ownership of state on successful commit. */ return 0; fail: if (ret == -EDEADLK) diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d3eaa5df187a..92c84e9ab09a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -137,7 +137,7 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); -int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); #define for_each_connector_in_state(state, connector, connector_state, __i) \ for ((__i) = 0; \ -- cgit v1.2.3 From 9f0ba539d13aebacb05dda542df7ef80684b2c70 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 May 2016 10:40:51 +0200 Subject: drm/gem: support BO freeing without dev->struct_mutex Finally all the core gem and a lot of drivers are entirely free of dev->struct_mutex depencies, and we can start to have an entirely lockless unref path. To make sure that no one who touches the core code accidentally breaks existing drivers which still require dev->struct_mutex I've made the might_lock check unconditional. While at it de-inline the ref/unref functions, they've become a bit too big. v2: Make it not leak like a sieve. v3: Review from Lucas: - drop != NULL in pointer checks. - fixup copypasted kerneldoc to actually match the functions. v4: Add __drm_gem_object_unreference as a fastpath helper for drivers who abolished dev->struct_mutex, requested by Chris. v5: Fix silly mistake in drm_gem_object_unreference_unlocked caught by intel-gfx CI - I checked for gem_free_object instead of gem_free_object_unlocked ... Cc: Chris Wilson Cc: Alex Deucher Cc: Lucas Stach Reviewed-by: Lucas Stach (v3) Reviewed-by: Chris Wilson (v4) Reviewed-by: Alex Deucher Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1462178451-1765-1-git-send-email-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_gem.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++- include/drm/drmP.h | 15 ++++++++++--- include/drm/drm_gem.h | 48 +++++++++++++---------------------------- 3 files changed, 80 insertions(+), 37 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 25dac31eef37..973eb8805ce0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -806,11 +806,63 @@ drm_gem_object_free(struct kref *kref) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (dev->driver->gem_free_object != NULL) + if (dev->driver->gem_free_object_unlocked) + dev->driver->gem_free_object_unlocked(obj); + else if (dev->driver->gem_free_object) dev->driver->gem_free_object(obj); } EXPORT_SYMBOL(drm_gem_object_free); +/** + * drm_gem_object_unreference_unlocked - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must not hold the + * dev->struct_mutex lock when calling this function. + * + * See also __drm_gem_object_unreference(). + */ +void +drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev; + + if (!obj) + return; + + dev = obj->dev; + might_lock(&dev->struct_mutex); + + if (dev->driver->gem_free_object_unlocked) + kref_put(&obj->refcount, drm_gem_object_free); + else if (kref_put_mutex(&obj->refcount, drm_gem_object_free, + &dev->struct_mutex)) + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_gem_object_unreference_unlocked); + +/** + * drm_gem_object_unreference - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must hold the dev->struct_mutex + * lock when calling this function, even when the driver doesn't use + * dev->struct_mutex for anything. + * + * For drivers not encumbered with legacy locking use + * drm_gem_object_unreference_unlocked() instead. + */ +void +drm_gem_object_unreference(struct drm_gem_object *obj) +{ + if (obj) { + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + + kref_put(&obj->refcount, drm_gem_object_free); + } +} +EXPORT_SYMBOL(drm_gem_object_unreference); + /** * drm_gem_vm_open - vma->ops->open implementation for GEM * @vma: VM area structure diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7901768bb47c..360b2a74e1ef 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -580,12 +580,21 @@ struct drm_driver { void (*debugfs_cleanup)(struct drm_minor *minor); /** - * Driver-specific constructor for drm_gem_objects, to set up - * obj->driver_private. + * @gem_free_object: deconstructor for drm_gem_objects * - * Returns 0 on success. + * This is deprecated and should not be used by new drivers. Use + * @gem_free_object_unlocked instead. */ void (*gem_free_object) (struct drm_gem_object *obj); + + /** + * @gem_free_object_unlocked: deconstructor for drm_gem_objects + * + * This is for drivers which are not encumbered with dev->struct_mutex + * legacy locking schemes. Use this hook instead of @gem_free_object. + */ + void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 0b3e11ab8757..408d6c47d98b 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj) } /** - * drm_gem_object_unreference - release a GEM BO reference + * __drm_gem_object_unreference - raw function to release a GEM BO reference * @obj: GEM buffer object * - * This releases a reference to @obj. Callers must hold the dev->struct_mutex - * lock when calling this function, even when the driver doesn't use - * dev->struct_mutex for anything. + * This function is meant to be used by drivers which are not encumbered with + * dev->struct_mutex legacy locking and which are using the + * gem_free_object_unlocked callback. It avoids all the locking checks and + * locking overhead of drm_gem_object_unreference() and + * drm_gem_object_unreference_unlocked(). * - * For drivers not encumbered with legacy locking use - * drm_gem_object_unreference_unlocked() instead. + * Drivers should never call this directly in their code. Instead they should + * wrap it up into a driver_gem_object_unreference(struct driver_gem_object + * *obj) wrapper function, and use that. Shared code should never call this, to + * avoid breaking drivers by accident which still depend upon dev->struct_mutex + * locking. */ static inline void -drm_gem_object_unreference(struct drm_gem_object *obj) +__drm_gem_object_unreference(struct drm_gem_object *obj) { - if (obj != NULL) { - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); - - kref_put(&obj->refcount, drm_gem_object_free); - } + kref_put(&obj->refcount, drm_gem_object_free); } -/** - * drm_gem_object_unreference_unlocked - release a GEM BO reference - * @obj: GEM buffer object - * - * This releases a reference to @obj. Callers must not hold the - * dev->struct_mutex lock when calling this function. - */ -static inline void -drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) -{ - struct drm_device *dev; - - if (!obj) - return; - - dev = obj->dev; - if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); - else - might_lock(&dev->struct_mutex); -} +void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj); +void drm_gem_object_unreference(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, -- cgit v1.2.3