diff options
author | Tiago Vignatti <tiago.vignatti@intel.com> | 2015-12-23 00:36:45 +0300 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-02-09 11:25:22 +0300 |
commit | 831e9da7dc5c22fd2a5fb64e999f6e077a4338c3 (patch) | |
tree | fd781ca5f233f8559189d35f18842277844e6551 /include/linux/dma-buf.h | |
parent | bfe981a0952880df43d08a050bf3ae44aaebd795 (diff) | |
download | linux-831e9da7dc5c22fd2a5fb64e999f6e077a4338c3.tar.xz |
dma-buf: Remove range-based flush
This patch removes range-based information used for optimizations in
begin_cpu_access and end_cpu_access.
We don't have any user nor implementation using range-based flush. It seems a
consensus that if we ever want something like that again (or even more robust
using 2D, 3D sub-range regions) we can use the upcoming dma-buf sync ioctl for
such.
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1450820214-12509-3-git-send-email-tiago.vignatti@intel.com
Diffstat (limited to 'include/linux/dma-buf.h')
-rw-r--r-- | include/linux/dma-buf.h | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index f98bd7068d55..532108ea0c1c 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -54,7 +54,7 @@ struct dma_buf_attachment; * @release: release this buffer; to be called after the last dma_buf_put. * @begin_cpu_access: [optional] called before cpu access to invalidate cpu * caches and allocate backing storage (if not yet done) - * respectively pin the objet into memory. + * respectively pin the object into memory. * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. @@ -93,10 +93,8 @@ struct dma_buf_ops { /* after final dma_buf_put() */ void (*release)(struct dma_buf *); - int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); - void (*end_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); @@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); -int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); -void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +void dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); |