summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:08:11 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:08:11 +0300
commit7e7b69654724c72bd3219b71f58937845dca0b2b (patch)
treef0ae96ae621cf765359e40958d916da85a92af29 /include
parentdaadb3bd0e8d3e317e36bc2c1542e86c528665e5 (diff)
parentf857acfc457ea63fa5b862d77f055665d863acfe (diff)
downloadlinux-7e7b69654724c72bd3219b71f58937845dca0b2b.tar.xz
Merge tag 'dma-mapping-5.17' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - refactor the dma-direct coherent allocator - turn an macro into an inline in scatterlist.h (Logan Gunthorpe) * tag 'dma-mapping-5.17' of git://git.infradead.org/users/hch/dma-mapping: lib/scatterlist: cleanup macros into static inline functions dma-direct: add a dma_direct_use_pool helper dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals dma-direct: warn if there is no pool for force unencrypted allocations dma-direct: fail allocations that can't be made coherent dma-direct: refactor the !coherent checks in dma_direct_alloc dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations dma-direct: clean up the remapping checks in dma_direct_alloc dma-direct: always leak memory that can't be re-encrypted dma-direct: don't call dma_set_decrypted for remapped allocations dma-direct: factor out dma_set_{de,en}crypted helpers
Diffstat (limited to 'include')
-rw-r--r--include/linux/scatterlist.h29
1 files changed, 23 insertions, 6 deletions
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 266754a55327..7ff9d6386c12 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -69,10 +69,27 @@ struct sg_append_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
-#define sg_is_last(sg) ((sg)->page_link & SG_END)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -92,7 +109,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
@@ -126,7 +143,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**