1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
|
/*
* Copyright (c) 2016 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __CXGBIT_H__
#define __CXGBIT_H__
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/completion.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/inet.h>
#include <linux/wait.h>
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <asm/byteorder.h>
#include <net/net_namespace.h>
#include <target/iscsi/iscsi_transport.h>
#include <iscsi_target_parameters.h>
#include <iscsi_target_login.h>
#include "t4_regs.h"
#include "t4_msg.h"
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "l2t.h"
#include "libcxgb_ppm.h"
#include "cxgbit_lro.h"
extern struct mutex cdev_list_lock;
extern struct list_head cdev_list_head;
struct cxgbit_np;
struct cxgbit_sock;
struct cxgbit_cmd {
struct scatterlist sg;
struct cxgbi_task_tag_info ttinfo;
bool setup_ddp;
bool release;
};
#define CXGBIT_MAX_ISO_PAYLOAD \
min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
struct cxgbit_iso_info {
u8 flags;
u32 mpdu;
u32 len;
u32 burst_len;
};
enum cxgbit_skcb_flags {
SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
SKCBF_RX_LRO = (1 << 3), /* lro skb */
};
struct cxgbit_skb_rx_cb {
u8 opcode;
void *pdu_cb;
void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
};
struct cxgbit_skb_tx_cb {
u8 submode;
u32 extra_len;
};
union cxgbit_skb_cb {
struct {
u8 flags;
union {
struct cxgbit_skb_tx_cb tx;
struct cxgbit_skb_rx_cb rx;
};
};
struct {
/* This member must be first. */
struct l2t_skb_cb l2t;
struct sk_buff *wr_next;
};
};
#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
static inline void *cplhdr(struct sk_buff *skb)
{
return skb->data;
}
enum cxgbit_cdev_flags {
CDEV_STATE_UP = 0,
CDEV_ISO_ENABLE,
CDEV_DDP_ENABLE,
};
#define NP_INFO_HASH_SIZE 32
struct np_info {
struct np_info *next;
struct cxgbit_np *cnp;
unsigned int stid;
};
struct cxgbit_list_head {
struct list_head list;
/* device lock */
spinlock_t lock;
};
struct cxgbit_device {
struct list_head list;
struct cxgb4_lld_info lldi;
struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
/* np lock */
spinlock_t np_lock;
u8 selectq[MAX_NPORTS][2];
struct cxgbit_list_head cskq;
u32 mdsl;
struct kref kref;
unsigned long flags;
};
struct cxgbit_wr_wait {
struct completion completion;
int ret;
};
enum cxgbit_csk_state {
CSK_STATE_IDLE = 0,
CSK_STATE_LISTEN,
CSK_STATE_CONNECTING,
CSK_STATE_ESTABLISHED,
CSK_STATE_ABORTING,
CSK_STATE_CLOSING,
CSK_STATE_MORIBUND,
CSK_STATE_DEAD,
};
enum cxgbit_csk_flags {
CSK_TX_DATA_SENT = 0,
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
struct cxgbit_device *cdev;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
struct cxgbit_wr_wait wr_wait;
enum cxgbit_csk_state state;
unsigned long flags;
};
struct cxgbit_np {
struct cxgbit_sock_common com;
wait_queue_head_t accept_wait;
struct iscsi_np *np;
struct completion accept_comp;
struct list_head np_accept_list;
/* np accept lock */
spinlock_t np_accept_lock;
struct kref kref;
unsigned int stid;
};
struct cxgbit_sock {
struct cxgbit_sock_common com;
struct cxgbit_np *cnp;
struct iscsi_conn *conn;
struct l2t_entry *l2t;
struct dst_entry *dst;
struct list_head list;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct sk_buff_head ppodq;
struct sk_buff_head backlogq;
struct sk_buff_head skbq;
struct sk_buff *wr_pending_head;
struct sk_buff *wr_pending_tail;
struct sk_buff *skb;
struct sk_buff *lro_skb;
struct sk_buff *lro_hskb;
struct list_head accept_node;
/* socket lock */
spinlock_t lock;
wait_queue_head_t waitq;
wait_queue_head_t ack_waitq;
bool lock_owner;
struct kref kref;
u32 max_iso_npdu;
u32 wr_cred;
u32 wr_una_cred;
u32 wr_max_cred;
u32 snd_una;
u32 tid;
u32 snd_nxt;
u32 rcv_nxt;
u32 smac_idx;
u32 tx_chan;
u32 mtu;
u32 write_seq;
u32 rx_credits;
u32 snd_win;
u32 rcv_win;
u16 mss;
u16 emss;
u16 plen;
u16 rss_qid;
u16 txq_idx;
u16 ctrlq_idx;
u8 tos;
u8 port_id;
#define CXGBIT_SUBMODE_HCRC 0x1
#define CXGBIT_SUBMODE_DCRC 0x2
u8 submode;
#ifdef CONFIG_CHELSIO_T4_DCB
u8 dcb_priority;
#endif
u8 snd_wscale;
};
void _cxgbit_free_cdev(struct kref *kref);
void _cxgbit_free_csk(struct kref *kref);
void _cxgbit_free_cnp(struct kref *kref);
static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
{
kref_get(&cdev->kref);
}
static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
{
kref_put(&cdev->kref, _cxgbit_free_cdev);
}
static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
{
kref_get(&csk->kref);
}
static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
{
kref_put(&csk->kref, _cxgbit_free_csk);
}
static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
{
kref_get(&cnp->kref);
}
static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
{
kref_put(&cnp->kref, _cxgbit_free_cnp);
}
static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
{
csk->wr_pending_tail = NULL;
csk->wr_pending_head = NULL;
}
static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
{
return csk->wr_pending_head;
}
static inline void
cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
{
cxgbit_skcb_tx_wr_next(skb) = NULL;
skb_get(skb);
if (!csk->wr_pending_head)
csk->wr_pending_head = skb;
else
cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
csk->wr_pending_tail = skb;
}
static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
{
struct sk_buff *skb = csk->wr_pending_head;
if (likely(skb)) {
csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
cxgbit_skcb_tx_wr_next(skb) = NULL;
}
return skb;
}
typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
struct sk_buff *);
int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
int cxgbit_rx_data_ack(struct cxgbit_sock *);
int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
struct l2t_entry *);
void cxgbit_push_tx_frames(struct cxgbit_sock *);
int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_datain_req *, const void *, u32);
void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_r2t *);
u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
void cxgbit_get_rx_pdu(struct iscsi_conn *);
int cxgbit_validate_params(struct iscsi_conn *);
struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
/* DDP */
int cxgbit_ddp_init(struct cxgbit_device *);
int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
static inline
struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
{
return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
}
#endif /* __CXGBIT_H__ */
|