1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
/*
* Copyright 2013-2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Licensed under the GPL-2.
*/
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/iio/buffer.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
struct iio_buffer_block {
u32 size;
u32 bytes_used;
};
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
* @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
IIO_BLOCK_STATE_DEAD,
};
/**
* struct iio_dma_buffer_block - IIO buffer block
* @head: List head
* @size: Total size of the block in bytes
* @bytes_used: Number of bytes that contain valid data
* @vaddr: Virutal address of the blocks memory
* @phys_addr: Physical address of the blocks memory
* @queue: Parent DMA buffer queue
* @kref: kref used to manage the lifetime of block
* @state: Current state of the block
*/
struct iio_dma_buffer_block {
/* May only be accessed by the owner of the block */
struct list_head head;
size_t bytes_used;
/*
* Set during allocation, constant thereafter. May be accessed read-only
* by anybody holding a reference to the block.
*/
void *vaddr;
dma_addr_t phys_addr;
size_t size;
struct iio_dma_buffer_queue *queue;
/* Must not be accessed outside the core. */
struct kref kref;
/*
* Must not be accessed outside the core. Access needs to hold
* queue->list_lock if the block is not owned by the core.
*/
enum iio_block_state state;
};
/**
* struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
* @blocks: Buffer blocks used for fileio
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
};
/**
* struct iio_dma_buffer_queue - DMA buffer base structure
* @buffer: IIO buffer base structure
* @dev: Parent device
* @ops: DMA buffer callbacks
* @lock: Protects the incoming list, active and the fields in the fileio
* substruct
* @list_lock: Protects lists that contain blocks which can be modified in
* atomic context as well as blocks on those lists. This is the outgoing queue
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
* @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
* @fileio: FileIO state
*/
struct iio_dma_buffer_queue {
struct iio_buffer buffer;
struct device *dev;
const struct iio_dma_buffer_ops *ops;
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
struct list_head outgoing;
bool active;
struct iio_dma_buffer_queue_fileio fileio;
};
/**
* struct iio_dma_buffer_ops - DMA buffer callback operations
* @submit: Called when a block is submitted to the DMA controller
* @abort: Should abort all pending transfers
*/
struct iio_dma_buffer_ops {
int (*submit)(struct iio_dma_buffer_queue *queue,
struct iio_dma_buffer_block *block);
void (*abort)(struct iio_dma_buffer_queue *queue);
};
void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
struct list_head *list);
int iio_dma_buffer_enable(struct iio_buffer *buffer,
struct iio_dev *indio_dev);
int iio_dma_buffer_disable(struct iio_buffer *buffer,
struct iio_dev *indio_dev);
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
#endif
|