1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
|
/*
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef QIB_VERBS_H
#define QIB_VERBS_H
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/rdma_vt.h>
struct qib_ctxtdata;
struct qib_pportdata;
struct qib_devdata;
struct qib_verbs_txreq;
#define QIB_MAX_RDMA_ATOMIC 16
#define QIB_GUIDS_PER_PORT 5
#define QPN_MAX (1 << 24)
#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
/*
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
#define QIB_UVERBS_ABI_VERSION 2
/*
* Define an ib_cq_notify value that is not valid so we know when CQ
* notifications are armed.
*/
#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
#define IB_SEQ_NAK (3 << 29)
/* AETH NAK opcode values */
#define IB_RNR_NAK 0x20
#define IB_NAK_PSN_ERROR 0x60
#define IB_NAK_INVALID_REQUEST 0x61
#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
#define IB_NAK_INVALID_RD_REQUEST 0x64
/* Flags for checking QP state (see ib_qib_state_ops[]) */
#define QIB_POST_SEND_OK 0x01
#define QIB_POST_RECV_OK 0x02
#define QIB_PROCESS_RECV_OK 0x04
#define QIB_PROCESS_SEND_OK 0x08
#define QIB_PROCESS_NEXT_SEND_OK 0x10
#define QIB_FLUSH_SEND 0x20
#define QIB_FLUSH_RECV 0x40
#define QIB_PROCESS_OR_FLUSH_SEND \
(QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
/* Mandatory IB performance counter select values. */
#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
#define IB_BTH_REQ_ACK (1 << 31)
#define IB_BTH_SOLICITED (1 << 23)
#define IB_BTH_MIG_REQ (1 << 22)
/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
#define IB_GRH_VERSION 6
#define IB_GRH_VERSION_MASK 0xF
#define IB_GRH_VERSION_SHIFT 28
#define IB_GRH_TCLASS_MASK 0xFF
#define IB_GRH_TCLASS_SHIFT 20
#define IB_GRH_FLOW_MASK 0xFFFFF
#define IB_GRH_FLOW_SHIFT 0
#define IB_GRH_NEXT_HDR 0x1B
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
/* Values for set/get portinfo VLCap OperationalVLs */
#define IB_VL_VL0 1
#define IB_VL_VL0_1 2
#define IB_VL_VL0_3 3
#define IB_VL_VL0_7 4
#define IB_VL_VL0_14 5
static inline int qib_num_vls(int vls)
{
switch (vls) {
default:
case IB_VL_VL0:
return 1;
case IB_VL_VL0_1:
return 2;
case IB_VL_VL0_3:
return 4;
case IB_VL_VL0_7:
return 8;
case IB_VL_VL0_14:
return 15;
}
}
struct ib_reth {
__be64 vaddr;
__be32 rkey;
__be32 length;
} __packed;
struct ib_atomic_eth {
__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
__be32 rkey;
__be64 swap_data;
__be64 compare_data;
} __packed;
struct qib_other_headers {
__be32 bth[3];
union {
struct {
__be32 deth[2];
__be32 imm_data;
} ud;
struct {
struct ib_reth reth;
__be32 imm_data;
} rc;
struct {
__be32 aeth;
__be32 atomic_ack_eth[2];
} at;
__be32 imm_data;
__be32 aeth;
struct ib_atomic_eth atomic_eth;
} u;
} __packed;
/*
* Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
* long (72 w/ imm_data). Only the first 56 bytes of the IB header
* will be in the eager header buffer. The remaining 12 or 16 bytes
* are in the data buffer.
*/
struct qib_ib_header {
__be16 lrh[4];
union {
struct {
struct ib_grh grh;
struct qib_other_headers oth;
} l;
struct qib_other_headers oth;
} u;
} __packed;
struct qib_pio_header {
__le32 pbc[2];
struct qib_ib_header hdr;
} __packed;
/*
* There is one struct qib_mcast for each multicast GID.
* All attached QPs are then stored as a list of
* struct qib_mcast_qp.
*/
struct qib_mcast_qp {
struct list_head list;
struct qib_qp *qp;
};
struct qib_mcast {
struct rb_node rb_node;
union ib_gid mgid;
struct list_head qp_list;
wait_queue_head_t wait;
atomic_t refcount;
int n_attached;
};
/* Protection domain */
struct qib_pd {
struct ib_pd ibpd;
int user; /* non-zero if created from user space */
};
/* Address Handle */
struct qib_ah {
struct ib_ah ibah;
struct ib_ah_attr attr;
atomic_t refcount;
};
/*
* This structure is used by qib_mmap() to validate an offset
* when an mmap() request is made. The vm_area_struct then uses
* this as its vm_private_data.
*/
struct qib_mmap_info {
struct list_head pending_mmaps;
struct ib_ucontext *context;
void *obj;
__u64 offset;
struct kref ref;
unsigned size;
};
/*
* This structure is used to contain the head pointer, tail pointer,
* and completion queue entries as a single memory allocation so
* it can be mmap'ed into user space.
*/
struct qib_cq_wc {
u32 head; /* index of next entry to fill */
u32 tail; /* index of next ib_poll_cq() entry */
union {
/* these are actually size ibcq.cqe + 1 */
struct ib_uverbs_wc uqueue[0];
struct ib_wc kqueue[0];
};
};
/*
* The completion queue structure.
*/
struct qib_cq {
struct ib_cq ibcq;
struct kthread_work comptask;
struct qib_devdata *dd;
spinlock_t lock; /* protect changes in this struct */
u8 notify;
u8 triggered;
struct qib_cq_wc *queue;
struct qib_mmap_info *ip;
};
/*
* A segment is a linear region of low physical memory.
* XXX Maybe we should use phys addr here and kmap()/kunmap().
* Used by the verbs layer.
*/
struct qib_seg {
void *vaddr;
size_t length;
};
/* The number of qib_segs that fit in a page. */
#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
struct qib_segarray {
struct qib_seg segs[QIB_SEGSZ];
};
struct qib_mregion {
struct ib_pd *pd; /* shares refcnt of ibmr.pd */
u64 user_base; /* User's address for this region */
u64 iova; /* IB start address of this region */
size_t length;
u32 lkey;
u32 offset; /* offset (bytes) to start of region */
int access_flags;
u32 max_segs; /* number of qib_segs in all the arrays */
u32 mapsz; /* size of the map array */
u8 page_shift; /* 0 - non unform/non powerof2 sizes */
u8 lkey_published; /* in global table */
struct completion comp; /* complete when refcount goes to zero */
struct rcu_head list;
atomic_t refcount;
struct qib_segarray *map[0]; /* the segments */
};
/*
* These keep track of the copy progress within a memory region.
* Used by the verbs layer.
*/
struct qib_sge {
struct qib_mregion *mr;
void *vaddr; /* kernel virtual address of segment */
u32 sge_length; /* length of the SGE */
u32 length; /* remaining length of the segment */
u16 m; /* current index: mr->map[m] */
u16 n; /* current index: mr->map[m]->segs[n] */
};
/* Memory region */
struct qib_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
u64 *pages;
u32 npages;
struct qib_mregion mr; /* must be last */
};
/*
* Send work request queue entry.
* The size of the sg_list is determined when the QP is created and stored
* in qp->s_max_sge.
*/
struct qib_swqe {
union {
struct ib_send_wr wr; /* don't use wr.sg_list */
struct ib_ud_wr ud_wr;
struct ib_reg_wr reg_wr;
struct ib_rdma_wr rdma_wr;
struct ib_atomic_wr atomic_wr;
};
u32 psn; /* first packet sequence number */
u32 lpsn; /* last packet sequence number */
u32 ssn; /* send sequence number */
u32 length; /* total length of data in sg_list */
struct qib_sge sg_list[0];
};
/*
* Receive work request queue entry.
* The size of the sg_list is determined when the QP (or SRQ) is created
* and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
*/
struct qib_rwqe {
u64 wr_id;
u8 num_sge;
struct ib_sge sg_list[0];
};
/*
* This structure is used to contain the head pointer, tail pointer,
* and receive work queue entries as a single memory allocation so
* it can be mmap'ed into user space.
* Note that the wq array elements are variable size so you can't
* just index into the array to get the N'th element;
* use get_rwqe_ptr() instead.
*/
struct qib_rwq {
u32 head; /* new work requests posted to the head */
u32 tail; /* receives pull requests from here. */
struct qib_rwqe wq[0];
};
struct qib_rq {
struct qib_rwq *wq;
u32 size; /* size of RWQE array */
u8 max_sge;
spinlock_t lock /* protect changes in this struct */
____cacheline_aligned_in_smp;
};
struct qib_srq {
struct ib_srq ibsrq;
struct qib_rq rq;
struct qib_mmap_info *ip;
/* send signal when number of RWQEs < limit */
u32 limit;
};
struct qib_sge_state {
struct qib_sge *sg_list; /* next SGE to be used if any */
struct qib_sge sge; /* progress state for the current SGE */
u32 total_len;
u8 num_sge;
};
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
*/
struct qib_ack_entry {
u8 opcode;
u8 sent;
u32 psn;
u32 lpsn;
union {
struct qib_sge rdma_sge;
u64 atomic_data;
};
};
/*
* Variables prefixed with s_ are for the requester (sender).
* Variables prefixed with r_ are for the responder (receiver).
* Variables prefixed with ack_ are for responder replies.
*
* Common variables are protected by both r_rq.lock and s_lock in that order
* which only happens in modify_qp() or changing the QP 'state'.
*/
struct qib_qp {
struct ib_qp ibqp;
/* read mostly fields above and below */
struct ib_ah_attr remote_ah_attr;
struct ib_ah_attr alt_ah_attr;
struct qib_qp __rcu *next; /* link list for QPN hash table */
struct qib_swqe *s_wq; /* send work queue */
struct qib_mmap_info *ip;
struct qib_ib_header *s_hdr; /* next packet header to send */
unsigned long timeout_jiffies; /* computed from timeout */
enum ib_mtu path_mtu;
u32 remote_qpn;
u32 pmtu; /* decoded from path_mtu */
u32 qkey; /* QKEY for this QP (for UD or RD) */
u32 s_size; /* send work queue size */
u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
u8 state; /* QP state */
u8 qp_access_flags;
u8 alt_timeout; /* Alternate path timeout for this QP */
u8 timeout; /* Timeout for this QP */
u8 s_srate;
u8 s_mig_state;
u8 port_num;
u8 s_pkey_index; /* PKEY index to use */
u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
u8 s_retry_cnt; /* number of times to retry */
u8 s_rnr_retry_cnt;
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
u8 s_max_sge; /* size of s_wq->sg_list */
u8 s_draining;
/* start of read/write fields */
atomic_t refcount ____cacheline_aligned_in_smp;
wait_queue_head_t wait;
struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
____cacheline_aligned_in_smp;
struct qib_sge_state s_rdma_read_sge;
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
unsigned long r_aflags;
u64 r_wr_id; /* ID for current receive WQE */
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
u32 r_len; /* total length of r_sge */
u32 r_rcv_len; /* receive data len processed */
u32 r_psn; /* expected rcv packet sequence number */
u32 r_msn; /* message sequence number */
u8 r_state; /* opcode of last packet received */
u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */
struct list_head rspwait; /* link for waititing to respond */
struct qib_sge_state r_sge; /* current receive data */
struct qib_rq r_rq; /* receive work queue */
spinlock_t s_lock ____cacheline_aligned_in_smp;
struct qib_sge_state *s_cur_sge;
u32 s_flags;
struct qib_verbs_txreq *s_tx;
struct qib_swqe *s_wqe;
struct qib_sge_state s_sge; /* current send request data */
struct qib_mregion *s_rdma_mr;
atomic_t s_dma_busy;
u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
u32 s_next_psn; /* PSN for next request */
u32 s_last_psn; /* last response PSN processed */
u32 s_sending_psn; /* lowest PSN that is being sent */
u32 s_sending_hpsn; /* highest PSN that is being sent */
u32 s_psn; /* current packet sequence number */
u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
u32 s_head; /* new entries added here */
u32 s_tail; /* next entry to process */
u32 s_cur; /* current work queue entry */
u32 s_acked; /* last un-ACK'ed entry */
u32 s_last; /* last completed entry */
u32 s_ssn; /* SSN of tail entry */
u32 s_lsn; /* limit sequence number (credit) */
u16 s_hdrwords; /* size of s_hdr in 32 bit words */
u16 s_rdma_ack_cnt;
u8 s_state; /* opcode of last packet sent */
u8 s_ack_state; /* opcode of packet to ACK */
u8 s_nak_state; /* non-zero if NAK is pending */
u8 r_nak_state; /* non-zero if NAK is pending */
u8 s_retry; /* requester retry counter */
u8 s_rnr_retry; /* requester RNR retry counter */
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
struct qib_sge_state s_ack_rdma_sge;
struct timer_list s_timer;
struct list_head iowait; /* link for wait PIO buf */
struct work_struct s_work;
wait_queue_head_t wait_dma;
struct qib_sge r_sg_list[0] /* verified SGEs */
____cacheline_aligned_in_smp;
};
/*
* Atomic bit definitions for r_aflags.
*/
#define QIB_R_WRID_VALID 0
#define QIB_R_REWIND_SGE 1
/*
* Bit definitions for r_flags.
*/
#define QIB_R_REUSE_SGE 0x01
#define QIB_R_RDMAR_SEQ 0x02
#define QIB_R_RSP_NAK 0x04
#define QIB_R_RSP_SEND 0x08
#define QIB_R_COMM_EST 0x10
/*
* Bit definitions for s_flags.
*
* QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
* QIB_S_BUSY - send tasklet is processing the QP
* QIB_S_TIMER - the RC retry timer is active
* QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
* QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
* before processing the next SWQE
* QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
* before processing the next SWQE
* QIB_S_WAIT_RNR - waiting for RNR timeout
* QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
* QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
* next send completion entry not via send DMA
* QIB_S_WAIT_PIO - waiting for a send buffer to be available
* QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
* QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
* QIB_S_WAIT_KMEM - waiting for kernel memory to be available
* QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
* QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
* QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
*/
#define QIB_S_SIGNAL_REQ_WR 0x0001
#define QIB_S_BUSY 0x0002
#define QIB_S_TIMER 0x0004
#define QIB_S_RESP_PENDING 0x0008
#define QIB_S_ACK_PENDING 0x0010
#define QIB_S_WAIT_FENCE 0x0020
#define QIB_S_WAIT_RDMAR 0x0040
#define QIB_S_WAIT_RNR 0x0080
#define QIB_S_WAIT_SSN_CREDIT 0x0100
#define QIB_S_WAIT_DMA 0x0200
#define QIB_S_WAIT_PIO 0x0400
#define QIB_S_WAIT_TX 0x0800
#define QIB_S_WAIT_DMA_DESC 0x1000
#define QIB_S_WAIT_KMEM 0x2000
#define QIB_S_WAIT_PSN 0x4000
#define QIB_S_WAIT_ACK 0x8000
#define QIB_S_SEND_ONE 0x10000
#define QIB_S_UNLIMITED_CREDIT 0x20000
/*
* Wait flags that would prevent any packet type from being sent.
*/
#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
/*
* Wait flags that would prevent send work requests from making progress.
*/
#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
#define QIB_PSN_CREDIT 16
/*
* Since struct qib_swqe is not a fixed size, we can't simply index into
* struct qib_qp.s_wq. This function does the array index computation.
*/
static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
unsigned n)
{
return (struct qib_swqe *)((char *)qp->s_wq +
(sizeof(struct qib_swqe) +
qp->s_max_sge *
sizeof(struct qib_sge)) * n);
}
/*
* Since struct qib_rwqe is not a fixed size, we can't simply index into
* struct qib_rwq.wq. This function does the array index computation.
*/
static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
{
return (struct qib_rwqe *)
((char *) rq->wq->wq +
(sizeof(struct qib_rwqe) +
rq->max_sge * sizeof(struct ib_sge)) * n);
}
/*
* QPN-map pages start out as NULL, they get allocated upon
* first use and are never deallocated. This way,
* large bitmaps are not allocated unless large numbers of QPs are used.
*/
struct qpn_map {
void *page;
};
struct qib_qpn_table {
spinlock_t lock; /* protect changes in this struct */
unsigned flags; /* flags for QP0/1 allocated for each port */
u32 last; /* last QP number allocated */
u32 nmaps; /* size of the map table */
u16 limit;
u16 mask;
/* bit map of free QP numbers other than 0/1 */
struct qpn_map map[QPNMAP_ENTRIES];
};
#define MAX_LKEY_TABLE_BITS 23
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
u32 gen; /* generation count */
u32 max; /* size of the table */
struct qib_mregion __rcu **table;
};
struct qib_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
};
struct qib_opcode_stats_perctx {
struct qib_opcode_stats stats[128];
};
struct qib_pma_counters {
u64 n_unicast_xmit; /* total unicast packets sent */
u64 n_unicast_rcv; /* total unicast packets received */
u64 n_multicast_xmit; /* total multicast packets sent */
u64 n_multicast_rcv; /* total multicast packets received */
};
struct qib_ibport {
struct qib_qp __rcu *qp0;
struct qib_qp __rcu *qp1;
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
struct qib_ah *sm_ah;
struct qib_ah *smi_ah;
struct rb_root mcast_tree;
spinlock_t lock; /* protect changes in this struct */
/* non-zero when timer is set */
unsigned long mkey_lease_timeout;
unsigned long trap_timeout;
__be64 gid_prefix; /* in network order */
__be64 mkey;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
u64 tid; /* TID for traps */
struct qib_pma_counters __percpu *pmastats;
u64 z_unicast_xmit; /* starting count for PMA */
u64 z_unicast_rcv; /* starting count for PMA */
u64 z_multicast_xmit; /* starting count for PMA */
u64 z_multicast_rcv; /* starting count for PMA */
u64 z_symbol_error_counter; /* starting count for PMA */
u64 z_link_error_recovery_counter; /* starting count for PMA */
u64 z_link_downed_counter; /* starting count for PMA */
u64 z_port_rcv_errors; /* starting count for PMA */
u64 z_port_rcv_remphys_errors; /* starting count for PMA */
u64 z_port_xmit_discards; /* starting count for PMA */
u64 z_port_xmit_data; /* starting count for PMA */
u64 z_port_rcv_data; /* starting count for PMA */
u64 z_port_xmit_packets; /* starting count for PMA */
u64 z_port_rcv_packets; /* starting count for PMA */
u32 z_local_link_integrity_errors; /* starting count for PMA */
u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
u32 z_vl15_dropped; /* starting count for PMA */
u32 n_rc_resends;
u32 n_rc_acks;
u32 n_rc_qacks;
u32 n_rc_delayed_comp;
u32 n_seq_naks;
u32 n_rdma_seq;
u32 n_rnr_naks;
u32 n_other_naks;
u32 n_loop_pkts;
u32 n_pkt_drops;
u32 n_vl15_dropped;
u32 n_rc_timeouts;
u32 n_dmawait;
u32 n_unaligned;
u32 n_rc_dupreq;
u32 n_rc_seqnak;
u32 port_cap_flags;
u32 pma_sample_start;
u32 pma_sample_interval;
__be16 pma_counter_select[5];
u16 pma_tag;
u16 pkey_violations;
u16 qkey_violations;
u16 mkey_violations;
u16 mkey_lease_period;
u16 sm_lid;
u16 repress_traps;
u8 sm_sl;
u8 mkeyprot;
u8 subnet_timeout;
u8 vl_high_limit;
u8 sl_to_vl[16];
};
struct qib_ibdev {
struct rvt_dev_info rdi;
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* protect mmap_offset */
u32 mmap_offset;
struct qib_mregion __rcu *dma_mr;
/* QP numbers are shared by all IB ports */
struct qib_qpn_table qpn_table;
struct qib_lkey_table lk_table;
struct list_head piowait; /* list for wait PIO buf */
struct list_head dmawait; /* list for wait DMA */
struct list_head txwait; /* list for wait qib_verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free;
struct timer_list mem_timer;
struct qib_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */
spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
u32 qp_table_size; /* size of the hash table */
u32 qp_rnd; /* random bytes for hash */
spinlock_t qpt_lock;
u32 n_piowait;
u32 n_txwait;
u32 n_pds_allocated; /* number of PDs allocated for device */
spinlock_t n_pds_lock;
u32 n_ahs_allocated; /* number of AHs allocated for device */
spinlock_t n_ahs_lock;
u32 n_cqs_allocated; /* number of CQs allocated for device */
spinlock_t n_cqs_lock;
u32 n_qps_allocated; /* number of QPs allocated for device */
spinlock_t n_qps_lock;
u32 n_srqs_allocated; /* number of SRQs allocated for device */
spinlock_t n_srqs_lock;
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
spinlock_t n_mcast_grps_lock;
#ifdef CONFIG_DEBUG_FS
/* per HCA debugfs */
struct dentry *qib_ibdev_dbg;
#endif
};
struct qib_verbs_counters {
u64 symbol_error_counter;
u64 link_error_recovery_counter;
u64 link_downed_counter;
u64 port_rcv_errors;
u64 port_rcv_remphys_errors;
u64 port_xmit_discards;
u64 port_xmit_data;
u64 port_rcv_data;
u64 port_xmit_packets;
u64 port_rcv_packets;
u32 local_link_integrity_errors;
u32 excessive_buffer_overrun_errors;
u32 vl15_dropped;
};
static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct qib_mr, ibmr);
}
static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct qib_pd, ibpd);
}
static inline struct qib_ah *to_iah(struct ib_ah *ibah)
{
return container_of(ibah, struct qib_ah, ibah);
}
static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct qib_cq, ibcq);
}
static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qib_srq, ibsrq);
}
static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct qib_qp, ibqp);
}
static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
{
struct rvt_dev_info *rdi;
rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
return container_of(rdi, struct qib_ibdev, rdi);
}
/*
* Send if not busy or waiting for I/O and either
* a RC response is pending or we can process send work requests.
*/
static inline int qib_send_ok(struct qib_qp *qp)
{
return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
!(qp->s_flags & QIB_S_ANY_WAIT_SEND));
}
/*
* This must be called with s_lock held.
*/
void qib_schedule_send(struct qib_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
u16 p1 = pkey1 & 0x7FFF;
u16 p2 = pkey2 & 0x7FFF;
/*
* Low 15 bits must be non-zero and match, and
* one of the two must be a full member.
*/
return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
}
void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
void qib_cap_mask_chg(struct qib_ibport *ibp);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
int qib_create_agents(struct qib_ibdev *dev);
void qib_free_agents(struct qib_ibdev *dev);
/*
* Compare the lower 24 bits of the two values.
* Returns an integer <, ==, or > than zero.
*/
static inline int qib_cmp24(u32 a, u32 b)
{
return (((int) a) - ((int) b)) << 8;
}
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
u64 *rwords, u64 *spkts, u64 *rpkts,
u64 *xmit_wait);
int qib_get_counters(struct qib_pportdata *ppd,
struct qib_verbs_counters *cntrs);
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int qib_mcast_tree_empty(struct qib_ibport *ibp);
__be32 qib_compute_aeth(struct qib_qp *qp);
struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int qib_destroy_qp(struct ib_qp *ibqp);
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
unsigned qib_free_all_qps(struct qib_devdata *dd);
void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
void qib_free_qpn_table(struct qib_qpn_table *qpt);
#ifdef CONFIG_DEBUG_FS
struct qib_qp_iter;
struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
int qib_qp_iter_next(struct qib_qp_iter *iter);
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
#endif
void qib_get_credit(struct qib_qp *qp, u32 aeth);
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
void qib_put_txreq(struct qib_verbs_txreq *tx);
int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
u32 hdrwords, struct qib_sge_state *ss, u32 len);
void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
int release);
void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp);
void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp);
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp);
int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
void qib_free_lkey(struct qib_mregion *mr);
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_sge *isge, struct ib_sge *sge, int acc);
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc);
int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata);
int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int qib_destroy_srq(struct ib_srq *ibsrq);
int qib_cq_init(struct qib_devdata *dd);
void qib_cq_exit(struct qib_devdata *dd);
void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *qib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int qib_destroy_cq(struct ib_cq *ibcq);
int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
int qib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_entries);
int qib_map_mr_sg(struct ib_mr *ibmr,
struct scatterlist *sg,
int sg_nents);
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova);
int qib_unmap_fmr(struct list_head *fmr_list);
int qib_dealloc_fmr(struct ib_fmr *ibfmr);
static inline void qib_get_mr(struct qib_mregion *mr)
{
atomic_inc(&mr->refcount);
}
void mr_rcu_callback(struct rcu_head *list);
static inline void qib_put_mr(struct qib_mregion *mr)
{
if (unlikely(atomic_dec_and_test(&mr->refcount)))
call_rcu(&mr->list, mr_rcu_callback);
}
static inline void qib_put_ss(struct qib_sge_state *ss)
{
while (ss->num_sge) {
qib_put_mr(ss->sge.mr);
if (--ss->num_sge)
ss->sge = *ss->sg_list++;
}
}
void qib_release_mmap_info(struct kref *ref);
struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
struct ib_ucontext *context,
void *obj);
void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
u32 size, void *obj);
int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
void qib_migrate_qp(struct qib_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, struct qib_qp *qp, u32 bth0);
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2);
void qib_do_send(struct work_struct *work);
void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
enum ib_wc_status status);
void qib_send_rc_ack(struct qib_qp *qp);
int qib_make_rc_req(struct qib_qp *qp);
int qib_make_uc_req(struct qib_qp *qp);
int qib_make_ud_req(struct qib_qp *qp);
int qib_register_ib_device(struct qib_devdata *);
void qib_unregister_ib_device(struct qib_devdata *);
void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
void qib_ib_piobufavail(struct qib_devdata *);
unsigned qib_get_npkeys(struct qib_devdata *);
unsigned qib_get_pkey(struct qib_ibport *, unsigned);
extern const enum ib_wc_opcode ib_qib_wc_opcode[];
/*
* Below HCA-independent IB PhysPortState values, returned
* by the f_ibphys_portstate() routine.
*/
#define IB_PHYSPORTSTATE_SLEEP 1
#define IB_PHYSPORTSTATE_POLL 2
#define IB_PHYSPORTSTATE_DISABLED 3
#define IB_PHYSPORTSTATE_CFG_TRAIN 4
#define IB_PHYSPORTSTATE_LINKUP 5
#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
#define IB_PHYSPORTSTATE_CFG_ENH 0x10
#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
extern const int ib_qib_state_ops[];
extern __be64 ib_qib_sys_image_guid; /* in network order */
extern unsigned int ib_qib_lkey_table_size;
extern unsigned int ib_qib_max_cqes;
extern unsigned int ib_qib_max_cqs;
extern unsigned int ib_qib_max_qp_wrs;
extern unsigned int ib_qib_max_qps;
extern unsigned int ib_qib_max_sges;
extern unsigned int ib_qib_max_mcast_grps;
extern unsigned int ib_qib_max_mcast_qp_attached;
extern unsigned int ib_qib_max_srqs;
extern unsigned int ib_qib_max_srq_sges;
extern unsigned int ib_qib_max_srq_wrs;
extern const u32 ib_qib_rnr_table[];
extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
#endif /* QIB_VERBS_H */
|