diff options
author | Dan Williams <dan.j.williams@intel.com> | 2010-05-18 03:24:16 +0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-05-18 03:24:16 +0400 |
commit | caa20d974c86af496b419eef70010e63b7fab7ac (patch) | |
tree | a38165bd839a398528a4ef4c7fa8481fb0fefed3 /include/linux/dmaengine.h | |
parent | c86e1401c9f2ba8d989fa1c4b33d0f0ec3ba8aaf (diff) | |
download | linux-caa20d974c86af496b419eef70010e63b7fab7ac.tar.xz |
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
Saves 24 bytes per descriptor (64-bit) when the channel-switching
capabilities of async_tx are not required.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 20ea12c86fd0..cb234979fc6b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -230,11 +230,71 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; +#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; +#endif }; +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + BUG(); +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} + +#else +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ + spin_lock_bh(&txd->lock); +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ + spin_unlock_bh(&txd->lock); +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + txd->next = next; + next->parent = txd; +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ + txd->parent = NULL; +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ + txd->next = NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return txd->parent; +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return txd->next; +} +#endif + /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported |