1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
|
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_QMAN_H
#define __FSL_QMAN_H
#include <linux/bitops.h>
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
#define QMAN_CHANNEL_POOL1 0x21
#define QMAN_CHANNEL_POOL1_REV3 0x401
extern u16 qm_channel_pool1;
/* Portal processing (interrupt) sources */
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
/*
* This mask contains all the interrupt sources that need handling except DQRI,
* ie. that if present should trigger slow-path processing.
*/
#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
QM_PIRQ_MRI)
/* For qman_static_dequeue_*** APIs */
#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
/* for n in [1,15] */
#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
/* for conversion from n of qm_channel */
static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
{
return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
}
/* --- QMan data structures (and associated constants) --- */
/* "Frame Descriptor (FD)" */
struct qm_fd {
union {
struct {
u8 cfg8b_w1;
u8 bpid; /* Buffer Pool ID */
u8 cfg8b_w3;
u8 addr_hi; /* high 8-bits of 40-bit address */
__be32 addr_lo; /* low 32-bits of 40-bit address */
} __packed;
__be64 data;
};
__be32 cfg; /* format, offset, length / congestion */
union {
__be32 cmd;
__be32 status;
};
} __aligned(8);
#define QM_FD_FORMAT_SG BIT(31)
#define QM_FD_FORMAT_LONG BIT(30)
#define QM_FD_FORMAT_COMPOUND BIT(29)
#define QM_FD_FORMAT_MASK GENMASK(31, 29)
#define QM_FD_OFF_SHIFT 20
#define QM_FD_OFF_MASK GENMASK(28, 20)
#define QM_FD_LEN_MASK GENMASK(19, 0)
#define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
enum qm_fd_format {
/*
* 'contig' implies a contiguous buffer, whereas 'sg' implies a
* scatter-gather table. 'big' implies a 29-bit length with no offset
* field, otherwise length is 20-bit and offset is 9-bit. 'compound'
* implies a s/g-like table, where each entry itself represents a frame
* (contiguous or scatter-gather) and the 29-bit "length" is
* interpreted purely for congestion calculations, ie. a "congestion
* weight".
*/
qm_fd_contig = 0,
qm_fd_contig_big = QM_FD_FORMAT_LONG,
qm_fd_sg = QM_FD_FORMAT_SG,
qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
qm_fd_compound = QM_FD_FORMAT_COMPOUND
};
static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
{
return be64_to_cpu(fd->data) & 0xffffffffffLLU;
}
static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
{
return be64_to_cpu(fd->data) & 0xffffffffffLLU;
}
static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
{
fd->addr_hi = upper_32_bits(addr);
fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
}
/*
* The 'format' field indicates the interpretation of the remaining
* 29 bits of the 32-bit word.
* If 'format' is _contig or _sg, 20b length and 9b offset.
* If 'format' is _contig_big or _sg_big, 29b length.
* If 'format' is _compound, 29b "congestion weight".
*/
static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
{
return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
}
static inline int qm_fd_get_offset(const struct qm_fd *fd)
{
return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
}
static inline int qm_fd_get_length(const struct qm_fd *fd)
{
return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
}
static inline int qm_fd_get_len_big(const struct qm_fd *fd)
{
return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
}
static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
int off, int len)
{
fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
}
#define qm_fd_set_contig(fd, off, len) \
qm_fd_set_param(fd, qm_fd_contig, off, len)
#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
#define qm_fd_set_contig_big(fd, len) \
qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
static inline void qm_fd_clear_fd(struct qm_fd *fd)
{
fd->data = 0;
fd->cfg = 0;
fd->cmd = 0;
}
/* Scatter/Gather table entry */
struct qm_sg_entry {
union {
struct {
u8 __reserved1[3];
u8 addr_hi; /* high 8-bits of 40-bit address */
__be32 addr_lo; /* low 32-bits of 40-bit address */
};
__be64 data;
};
__be32 cfg; /* E bit, F bit, length */
u8 __reserved2;
u8 bpid;
__be16 offset; /* 13-bit, _res[13-15]*/
} __packed;
#define QM_SG_LEN_MASK GENMASK(29, 0)
#define QM_SG_OFF_MASK GENMASK(12, 0)
#define QM_SG_FIN BIT(30)
#define QM_SG_EXT BIT(31)
static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
{
return be64_to_cpu(sg->data) & 0xffffffffffLLU;
}
static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
{
return be64_to_cpu(sg->data) & 0xffffffffffLLU;
}
static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
{
sg->addr_hi = upper_32_bits(addr);
sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
}
static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
{
return be32_to_cpu(sg->cfg) & QM_SG_FIN;
}
static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
{
return be32_to_cpu(sg->cfg) & QM_SG_EXT;
}
static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
{
return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
}
static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
{
sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
}
static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
{
sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
}
static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
{
return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
}
/* "Frame Dequeue Response" */
struct qm_dqrr_entry {
u8 verb;
u8 stat;
u16 seqnum; /* 15-bit */
u8 tok;
u8 __reserved2[3];
u32 fqid; /* 24-bit */
u32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
} __packed;
#define QM_DQRR_VERB_VBIT 0x80
#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
/* 'fqid' is a 24-bit field in every h/w descriptor */
#define QM_FQID_MASK GENMASK(23, 0)
#define qm_fqid_set(p, v) ((p)->fqid = ((v) & QM_FQID_MASK))
#define qm_fqid_get(p) ((p)->fqid & QM_FQID_MASK)
/* "ERN Message Response" */
/* "FQ State Change Notification" */
union qm_mr_entry {
struct {
u8 verb;
u8 __reserved[63];
};
struct {
u8 verb;
u8 dca;
u16 seqnum;
u8 rc; /* Rej Code: 8-bit */
u8 __reserved[3];
u32 fqid; /* 24-bit */
u32 tag;
struct qm_fd fd;
u8 __reserved1[32];
} __packed ern;
struct {
u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
u32 fqid; /* 24-bit */
u32 context_b;
u8 __reserved2[48];
} __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
};
#define QM_MR_VERB_VBIT 0x80
/*
* ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
* which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
* from the other MR types by noting if the 0x20 bit is unset.
*/
#define QM_MR_VERB_TYPE_MASK 0x27
#define QM_MR_VERB_DC_ERN 0x20
#define QM_MR_VERB_FQRN 0x21
#define QM_MR_VERB_FQRNI 0x22
#define QM_MR_VERB_FQRL 0x23
#define QM_MR_VERB_FQPN 0x24
#define QM_MR_RC_MASK 0xf0 /* contains one of; */
#define QM_MR_RC_CGR_TAILDROP 0x00
#define QM_MR_RC_WRED 0x10
#define QM_MR_RC_ERROR 0x20
#define QM_MR_RC_ORPWINDOW_EARLY 0x30
#define QM_MR_RC_ORPWINDOW_LATE 0x40
#define QM_MR_RC_FQ_TAILDROP 0x50
#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
#define QM_MR_RC_ORP_ZERO 0x70
#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
/*
* An identical structure of FQD fields is present in the "Init FQ" command and
* the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
* Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
* latter has two inlines to assist with converting to/from the mant+exp
* representation.
*/
struct qm_fqd_stashing {
/* See QM_STASHING_EXCL_<...> */
u8 exclusive;
/* Numbers of cachelines */
u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
};
struct qm_fqd_oac {
/* "Overhead Accounting Control", see QM_OAC_<...> */
u8 oac; /* oac[6-7], _res[0-5] */
/* Two's-complement value (-128 to +127) */
s8 oal; /* "Overhead Accounting Length" */
};
struct qm_fqd {
/* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
u8 orpc;
u8 cgid;
__be16 fq_ctrl; /* See QM_FQCTRL_<...> */
__be16 dest_wq; /* channel[3-15], wq[0-2] */
__be16 ics_cred; /* 15-bit */
/*
* For "Initialize Frame Queue" commands, the write-enable mask
* determines whether 'td' or 'oac_init' is observed. For query
* commands, this field is always 'td', and 'oac_query' (below) reflects
* the Overhead ACcounting values.
*/
union {
__be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
struct qm_fqd_oac oac_init;
};
__be32 context_b;
union {
/* Treat it as 64-bit opaque */
__be64 opaque;
struct {
__be32 hi;
__be32 lo;
};
/* Treat it as s/w portal stashing config */
/* see "FQD Context_A field used for [...]" */
struct {
struct qm_fqd_stashing stashing;
/*
* 48-bit address of FQ context to
* stash, must be cacheline-aligned
*/
__be16 context_hi;
__be32 context_lo;
} __packed;
} context_a;
struct qm_fqd_oac oac_query;
} __packed;
#define QM_FQD_CHAN_OFF 3
#define QM_FQD_WQ_MASK GENMASK(2, 0)
#define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
#define QM_FQD_TD_MANT_OFF 5
#define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
#define QM_FQD_TD_MAX 0xe0000000
#define QM_FQD_TD_MANT_MAX 0xff
#define QM_FQD_OAC_OFF 6
#define QM_FQD_AS_OFF 4
#define QM_FQD_DS_OFF 2
#define QM_FQD_XS_MASK 0x3
/* 64-bit converters for context_hi/lo */
static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
{
return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
}
static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
{
return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
}
static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
{
return qm_fqd_stashing_get64(fqd);
}
static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
{
fqd->context_a.context_hi = upper_32_bits(addr);
fqd->context_a.context_lo = lower_32_bits(addr);
}
static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
{
fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
}
/* convert a threshold value into mant+exp representation */
static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
int roundup)
{
u32 e = 0;
int td, oddbit = 0;
if (val > QM_FQD_TD_MAX)
return -ERANGE;
while (val > QM_FQD_TD_MANT_MAX) {
oddbit = val & 1;
val >>= 1;
e++;
if (roundup && oddbit)
val++;
}
td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
td |= (e & QM_FQD_TD_EXP_MASK);
fqd->td = cpu_to_be16(td);
return 0;
}
/* and the other direction */
static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
{
int td = be16_to_cpu(fqd->td);
return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
<< (td & QM_FQD_TD_EXP_MASK);
}
static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
{
struct qm_fqd_stashing *st = &fqd->context_a.stashing;
st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
(cs & QM_FQD_XS_MASK);
}
static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
{
return fqd->context_a.stashing.cl;
}
static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
{
fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
}
static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
{
fqd->oac_init.oal = val;
}
static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
{
fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
(wq & QM_FQD_WQ_MASK));
}
static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
{
return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
}
static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
{
return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
}
/* See "Frame Queue Descriptor (FQD)" */
/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
/* See "FQD Context_A field used for [...] */
/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
#define QM_STASHING_EXCL_ANNOTATION 0x04
#define QM_STASHING_EXCL_DATA 0x02
#define QM_STASHING_EXCL_CTX 0x01
/* See "Intra Class Scheduling" */
/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
/*
* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
* and associated commands/responses. The WRED parameters are calculated from
* these fields as follows;
* MaxTH = MA * (2 ^ Mn)
* Slope = SA / (2 ^ Sn)
* MaxP = 4 * (Pn + 1)
*/
struct qm_cgr_wr_parm {
/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
u32 word;
};
/*
* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
* management commands, this is padded to a 16-bit structure field, so that's
* how we represent it here. The congestion state threshold is calculated from
* these fields as follows;
* CS threshold = TA * (2 ^ Tn)
*/
struct qm_cgr_cs_thres {
/* _res[13-15], TA[5-12], Tn[0-4] */
u16 word;
};
/*
* This identical structure of CGR fields is present in the "Init/Modify CGR"
* commands and the "Query CGR" result. It's suctioned out here into its own
* struct.
*/
struct __qm_mc_cgr {
struct qm_cgr_wr_parm wr_parm_g;
struct qm_cgr_wr_parm wr_parm_y;
struct qm_cgr_wr_parm wr_parm_r;
u8 wr_en_g; /* boolean, use QM_CGR_EN */
u8 wr_en_y; /* boolean, use QM_CGR_EN */
u8 wr_en_r; /* boolean, use QM_CGR_EN */
u8 cscn_en; /* boolean, use QM_CGR_EN */
union {
struct {
u16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
u16 cscn_targ_dcp_low;
};
u32 cscn_targ; /* use QM_CGR_TARG_* */
};
u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */
struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
} __packed;
#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
/* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{
return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
}
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
int roundup)
{
u32 e = 0;
int oddbit = 0;
while (val > 0xff) {
oddbit = val & 1;
val >>= 1;
e++;
if (roundup && oddbit)
val++;
}
th->word = ((val & 0xff) << 5) | (e & 0x1f);
return 0;
}
/* "Initialize FQ" */
struct qm_mcc_initfq {
u8 __reserved1[2];
u16 we_mask; /* Write Enable Mask */
u32 fqid; /* 24-bit */
u16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */
u8 __reserved2[30];
} __packed;
/* "Initialize/Modify CGR" */
struct qm_mcc_initcgr {
u8 __reserve1[2];
u16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2];
u8 cgid;
u8 __reserved3[32];
} __packed;
/* INITFQ-specific flags */
#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
#define QM_INITFQ_WE_OAC 0x0100
#define QM_INITFQ_WE_ORPC 0x0080
#define QM_INITFQ_WE_CGID 0x0040
#define QM_INITFQ_WE_FQCTRL 0x0020
#define QM_INITFQ_WE_DESTWQ 0x0010
#define QM_INITFQ_WE_ICSCRED 0x0008
#define QM_INITFQ_WE_TDTHRESH 0x0004
#define QM_INITFQ_WE_CONTEXTB 0x0002
#define QM_INITFQ_WE_CONTEXTA 0x0001
/* INITCGR/MODIFYCGR-specific flags */
#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
#define QM_CGR_WE_WR_PARM_G 0x0400
#define QM_CGR_WE_WR_PARM_Y 0x0200
#define QM_CGR_WE_WR_PARM_R 0x0100
#define QM_CGR_WE_WR_EN_G 0x0080
#define QM_CGR_WE_WR_EN_Y 0x0040
#define QM_CGR_WE_WR_EN_R 0x0020
#define QM_CGR_WE_CSCN_EN 0x0010
#define QM_CGR_WE_CSCN_TARG 0x0008
#define QM_CGR_WE_CSTD_EN 0x0004
#define QM_CGR_WE_CS_THRES 0x0002
#define QM_CGR_WE_MODE 0x0001
#define QMAN_CGR_FLAG_USE_INIT 0x00000001
/* Portal and Frame Queues */
/* Represents a managed portal */
struct qman_portal;
/*
* This object type represents QMan frame queue descriptors (FQD), it is
* cacheline-aligned, and initialised by qman_create_fq(). The structure is
* defined further down.
*/
struct qman_fq;
/*
* This object type represents a QMan congestion group, it is defined further
* down.
*/
struct qman_cgr;
/*
* This enum, and the callback type that returns it, are used when handling
* dequeued frames via DQRR. Note that for "null" callbacks registered with the
* portal object (for handling dequeues that do not demux because context_b is
* NULL), the return value *MUST* be qman_cb_dqrr_consume.
*/
enum qman_cb_dqrr_result {
/* DQRR entry can be consumed */
qman_cb_dqrr_consume,
/* Like _consume, but requests parking - FQ must be held-active */
qman_cb_dqrr_park,
/* Does not consume, for DCA mode only. */
qman_cb_dqrr_defer,
/*
* Stop processing without consuming this ring entry. Exits the current
* qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
* an interrupt handler, the callback would typically call
* qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
* otherwise the interrupt will reassert immediately.
*/
qman_cb_dqrr_stop,
/* Like qman_cb_dqrr_stop, but consumes the current entry. */
qman_cb_dqrr_consume_stop
};
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr);
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
* are always consumed after the callback returns.
*/
typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
const union qm_mr_entry *msg);
/*
* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
* held-active + held-suspended are just "sched". Things like "retired" will not
* be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
* then, to indicate it's completing and to gate attempts to retry the retire
* command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
* technically impossible in the case of enqueue DCAs (which refer to DQRR ring
* index rather than the FQ that ring entry corresponds to), so repeated park
* commands are allowed (if you're silly enough to try) but won't change FQ
* state, and the resulting park notifications move FQs from "sched" to
* "parked".
*/
enum qman_fq_state {
qman_fq_state_oos,
qman_fq_state_parked,
qman_fq_state_sched,
qman_fq_state_retired
};
#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
/*
* Frame queue objects (struct qman_fq) are stored within memory passed to
* qman_create_fq(), as this allows stashing of caller-provided demux callback
* pointers at no extra cost to stashing of (driver-internal) FQ state. If the
* caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
* they should;
*
* (a) extend the qman_fq structure with their state; eg.
*
* // myfq is allocated and driver_fq callbacks filled in;
* struct my_fq {
* struct qman_fq base;
* int an_extra_field;
* [ ... add other fields to be associated with each FQ ...]
* } *myfq = some_my_fq_allocator();
* struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
*
* // in a dequeue callback, access extra fields from 'fq' via a cast;
* struct my_fq *myfq = (struct my_fq *)fq;
* do_something_with(myfq->an_extra_field);
* [...]
*
* (b) when and if configuring the FQ for context stashing, specify how ever
* many cachelines are required to stash 'struct my_fq', to accelerate not
* only the QMan driver but the callback as well.
*/
struct qman_fq_cb {
qman_cb_dqrr dqrr; /* for dequeued frames */
qman_cb_mr ern; /* for s/w ERNs */
qman_cb_mr fqs; /* frame-queue state changes*/
};
struct qman_fq {
/* Caller of qman_create_fq() provides these demux callbacks */
struct qman_fq_cb cb;
/*
* These are internal to the driver, don't touch. In particular, they
* may change, be removed, or extended (so you shouldn't rely on
* sizeof(qman_fq) being a constant).
*/
u32 fqid, idx;
unsigned long flags;
enum qman_fq_state state;
int cgr_groupid;
};
/*
* This callback type is used when handling congestion group entry/exit.
* 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
*/
typedef void (*qman_cb_cgr)(struct qman_portal *qm,
struct qman_cgr *cgr, int congested);
struct qman_cgr {
/* Set these prior to qman_create_cgr() */
u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
qman_cb_cgr cb;
/* These are private to the driver */
u16 chan; /* portal channel this object is created on */
struct list_head node;
};
/* Flags to qman_create_fq() */
#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
/* Flags to qman_init_fq() */
#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
/* Portal Management */
/**
* qman_p_irqsource_add - add processing sources to be interrupt-driven
* @bits: bitmask of QM_PIRQ_**I processing sources
*
* Adds processing sources that should be interrupt-driven (rather than
* processed via qman_poll_***() functions).
*/
void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
/**
* qman_p_irqsource_remove - remove processing sources from being int-driven
* @bits: bitmask of QM_PIRQ_**I processing sources
*
* Removes processing sources from being interrupt-driven, so that they will
* instead be processed via qman_poll_***() functions.
*/
void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
/**
* qman_affine_cpus - return a mask of cpus that have affine portals
*/
const cpumask_t *qman_affine_cpus(void);
/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
*
* If @cpu is -1, the affine portal for the current CPU will be used. It is a
* bug to call this function for any value of @cpu (other than -1) that is not a
* member of the mask returned from qman_affine_cpus().
*/
u16 qman_affine_channel(int cpu);
/**
* qman_get_affine_portal - return the portal pointer affine to cpu
* @cpu: the cpu whose affine portal is the subject of the query
*/
struct qman_portal *qman_get_affine_portal(int cpu);
/**
* qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process
*
* Use of this function requires that DQRR processing not be interrupt-driven.
* The return value represents the number of DQRR entries processed.
*/
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
/**
* qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
* @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
*
* Adds a set of pool channels to the portal's static dequeue command register
* (SDQCR). The requested pools are limited to those the portal has dequeue
* access to.
*/
void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
/* FQ management */
/**
* qman_create_fq - Allocates a FQ
* @fqid: the index of the FQD to encapsulate, must be "Out of Service"
* @flags: bit-mask of QMAN_FQ_FLAG_*** options
* @fq: memory for storing the 'fq', with callbacks filled in
*
* Creates a frame queue object for the given @fqid, unless the
* QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
* dynamically allocated (or the function fails if none are available). Once
* created, the caller should not touch the memory at 'fq' except as extended to
* adjacent memory for user-defined fields (see the definition of "struct
* qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
* pre-existing frame-queues that aren't to be otherwise interfered with, it
* prevents all other modifications to the frame queue. The TO_DCPORTAL flag
* causes the driver to honour any context_b modifications requested in the
* qm_init_fq() API, as this indicates the frame queue will be consumed by a
* direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
* software portals, the context_b field is controlled by the driver and can't
* be modified by the caller.
*/
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
/**
* qman_destroy_fq - Deallocates a FQ
* @fq: the frame queue object to release
*
* The memory for this frame queue object ('fq' provided in qman_create_fq()) is
* not deallocated but the caller regains ownership, to do with as desired. The
* FQ must be in the 'out-of-service' or in the 'parked' state.
*/
void qman_destroy_fq(struct qman_fq *fq);
/**
* qman_fq_fqid - Queries the frame queue ID of a FQ object
* @fq: the frame queue object to query
*/
u32 qman_fq_fqid(struct qman_fq *fq);
/**
* qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
* @fq: the frame queue object to modify, must be 'parked' or new.
* @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
* @opts: the FQ-modification settings, as defined in the low-level API
*
* The @opts parameter comes from the low-level portal API. Select
* QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
* rather than parked. NB, @opts can be NULL.
*
* Note that some fields and options within @opts may be ignored or overwritten
* by the driver;
* 1. the 'count' and 'fqid' fields are always ignored (this operation only
* affects one frame queue: @fq).
* 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
* 'fqd' structure's 'context_b' field are sometimes overwritten;
* - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
* initialised to a value used by the driver for demux.
* - if context_b is initialised for demux, so is context_a in case stashing
* is requested (see item 4).
* (So caller control of context_b is only possible for TO_DCPORTAL frame queue
* objects.)
* 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
* 'dest::channel' field will be overwritten to match the portal used to issue
* the command. If the WE_DESTWQ write-enable bit had already been set by the
* caller, the channel workqueue will be left as-is, otherwise the write-enable
* bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
* isn't set, the destination channel/workqueue fields and the write-enable bit
* are left as-is.
* 4. if the driver overwrites context_a/b for demux, then if
* QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
* context_a.address fields and will leave the stashing fields provided by the
* user alone, otherwise it will zero out the context_a.stashing fields.
*/
int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
/**
* qman_schedule_fq - Schedules a FQ
* @fq: the frame queue object to schedule, must be 'parked'
*
* Schedules the frame queue, which must be Parked, which takes it to
* Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
*/
int qman_schedule_fq(struct qman_fq *fq);
/**
* qman_retire_fq - Retires a FQ
* @fq: the frame queue object to retire
* @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
*
* Retires the frame queue. This returns zero if it succeeds immediately, +1 if
* the retirement was started asynchronously, otherwise it returns negative for
* failure. When this function returns zero, @flags is set to indicate whether
* the retired FQ is empty and/or whether it has any ORL fragments (to show up
* as ERNs). Otherwise the corresponding flags will be known when a subsequent
* FQRN message shows up on the portal's message ring.
*
* NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
* Active state), the completion will be via the message ring as a FQRN - but
* the corresponding callback may occur before this function returns!! Ie. the
* caller should be prepared to accept the callback as the function is called,
* not only once it has returned.
*/
int qman_retire_fq(struct qman_fq *fq, u32 *flags);
/**
* qman_oos_fq - Puts a FQ "out of service"
* @fq: the frame queue object to be put out-of-service, must be 'retired'
*
* The frame queue must be retired and empty, and if any order restoration list
* was released as ERNs at the time of retirement, they must all be consumed.
*/
int qman_oos_fq(struct qman_fq *fq);
/**
* qman_enqueue - Enqueue a frame to a frame queue
* @fq: the frame queue object to enqueue to
* @fd: a descriptor of the frame to be enqueued
*
* Fills an entry in the EQCR of portal @qm to enqueue the frame described by
* @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
* field is ignored. The return value is non-zero on error, such as ring full.
*/
int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
/**
* qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
* @result: is set by the API to the base FQID of the allocated range
* @count: the number of FQIDs required
*
* Returns 0 on success, or a negative error code.
*/
int qman_alloc_fqid_range(u32 *result, u32 count);
#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
/**
* qman_release_fqid - Release the specified frame queue ID
* @fqid: the FQID to be released back to the resource pool
*
* This function can also be used to seed the allocator with
* FQID ranges that it can subsequently allocate from.
* Returns 0 on success, or a negative error code.
*/
int qman_release_fqid(u32 fqid);
/* Pool-channel management */
/**
* qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
* @result: is set by the API to the base pool-channel ID of the allocated range
* @count: the number of pool-channel IDs required
*
* Returns 0 on success, or a negative error code.
*/
int qman_alloc_pool_range(u32 *result, u32 count);
#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
/**
* qman_release_pool - Release the specified pool-channel ID
* @id: the pool-chan ID to be released back to the resource pool
*
* This function can also be used to seed the allocator with
* pool-channel ID ranges that it can subsequently allocate from.
* Returns 0 on success, or a negative error code.
*/
int qman_release_pool(u32 id);
/* CGR management */
/**
* qman_create_cgr - Register a congestion group object
* @cgr: the 'cgr' object, with fields filled in
* @flags: QMAN_CGR_FLAG_* values
* @opts: optional state of CGR settings
*
* Registers this object to receiving congestion entry/exit callbacks on the
* portal affine to the cpu portal on which this API is executed. If opts is
* NULL then only the callback (cgr->cb) function is registered. If @flags
* contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
* any unspecified parameters) will be used rather than a modify hw hardware
* (which only modifies the specified parameters).
*/
int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts);
/**
* qman_delete_cgr - Deregisters a congestion group object
* @cgr: the 'cgr' object to deregister
*
* "Unplugs" this CGR object from the portal affine to the cpu on which this API
* is executed. This must be excuted on the same affine portal on which it was
* created.
*/
int qman_delete_cgr(struct qman_cgr *cgr);
/**
* qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
* @cgr: the 'cgr' object to deregister
*
* This will select the proper CPU and run there qman_delete_cgr().
*/
void qman_delete_cgr_safe(struct qman_cgr *cgr);
/**
* qman_query_cgr_congested - Queries CGR's congestion status
* @cgr: the 'cgr' object to query
* @result: returns 'cgr's congestion status, 1 (true) if congested
*/
int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
/**
* qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
* @result: is set by the API to the base CGR ID of the allocated range
* @count: the number of CGR IDs required
*
* Returns 0 on success, or a negative error code.
*/
int qman_alloc_cgrid_range(u32 *result, u32 count);
#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
/**
* qman_release_cgrid - Release the specified CGR ID
* @id: the CGR ID to be released back to the resource pool
*
* This function can also be used to seed the allocator with
* CGR ID ranges that it can subsequently allocate from.
* Returns 0 on success, or a negative error code.
*/
int qman_release_cgrid(u32 id);
#endif /* __FSL_QMAN_H */
|