diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-04-18 07:17:25 +0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-04-18 00:25:05 +0400 |
commit | 19242d7233df7d658405d4b7ee1758d21414cfaa (patch) | |
tree | 4bffa2700c30fdb454dfa150115a0607c6cf3d2a /include | |
parent | 1c62979ed29a8e2bf9fbe1db101c81a0089676f8 (diff) | |
download | linux-19242d7233df7d658405d4b7ee1758d21414cfaa.tar.xz |
async_tx: fix multiple dependency submission
Shrink struct dma_async_tx_descriptor and introduce
async_tx_channel_switch to properly inject a channel switch interrupt in
the descriptor stream. This simplifies the locking model as drivers no
longer need to handle dma_async_tx_descriptor.lock.
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/dmaengine.h | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 34d440698293..91252a7e4d03 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -221,11 +221,9 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- - * @depend_list: at completion this list of transactions are submitted - * @depend_node: allow this transaction to be executed after another - * transaction has completed, possibly on another channel + * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain - * @lock: protect the dependency list + * @lock: protect the parent and next pointers */ struct dma_async_tx_descriptor { dma_cookie_t cookie; @@ -236,8 +234,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; - struct list_head depend_list; - struct list_head depend_node; + struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; }; |