1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
|
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2016-2017 Hisilicon Limited. */
#ifndef __HCLGEVF_CMD_H
#define __HCLGEVF_CMD_H
#include <linux/io.h>
#include <linux/types.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
struct hclgevf_hw;
struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
struct hclgevf_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
};
struct hclgevf_desc_cb {
dma_addr_t dma;
void *va;
u32 length;
};
struct hclgevf_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
struct hclgevf_desc_cb *desc_cb;
struct hclgevf_dev *dev;
u32 head;
u32 tail;
u16 buf_size;
u16 desc_num;
int next_to_use;
int next_to_clean;
u8 flag;
spinlock_t lock; /* Command queue lock */
};
enum hclgevf_cmd_return_status {
HCLGEVF_CMD_EXEC_SUCCESS = 0,
HCLGEVF_CMD_NO_AUTH = 1,
HCLGEVF_CMD_NOT_SUPPORTED = 2,
HCLGEVF_CMD_QUEUE_FULL = 3,
HCLGEVF_CMD_NEXT_ERR = 4,
HCLGEVF_CMD_UNEXE_ERR = 5,
HCLGEVF_CMD_PARA_ERR = 6,
HCLGEVF_CMD_RESULT_ERR = 7,
HCLGEVF_CMD_TIMEOUT = 8,
HCLGEVF_CMD_HILINK_ERR = 9,
HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
HCLGEVF_CMD_INVALID = 11,
};
enum hclgevf_cmd_status {
HCLGEVF_STATUS_SUCCESS = 0,
HCLGEVF_ERR_CSQ_FULL = -1,
HCLGEVF_ERR_CSQ_TIMEOUT = -2,
HCLGEVF_ERR_CSQ_ERROR = -3
};
struct hclgevf_cmq {
struct hclgevf_cmq_ring csq;
struct hclgevf_cmq_ring crq;
u16 tx_timeout; /* Tx timeout */
enum hclgevf_cmd_status last_status;
};
#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* GRO command */
HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
/* IMP stats command */
HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
#define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024
#define HCLGEVF_TQP_EXT_REG_OFFSET 0x100
struct hclgevf_tqp_map {
__le16 tqp_id; /* Absolute tqp id for in this pf */
u8 tqp_vf; /* VF id */
#define HCLGEVF_TQP_MAP_TYPE_PF 0
#define HCLGEVF_TQP_MAP_TYPE_VF 1
#define HCLGEVF_TQP_MAP_TYPE_B 0
#define HCLGEVF_TQP_MAP_EN_B 1
u8 tqp_flag; /* Indicate it's pf or vf tqp */
__le16 tqp_vid; /* Virtual id in this pf/vf */
u8 rsv[18];
};
#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
enum hclgevf_int_type {
HCLGEVF_INT_TX = 0,
HCLGEVF_INT_RX,
HCLGEVF_INT_EVENT,
};
struct hclgevf_ctrl_vector_chain {
u8 int_vector_id;
u8 int_cause_num;
#define HCLGEVF_INT_TYPE_S 0
#define HCLGEVF_INT_TYPE_M 0x3
#define HCLGEVF_TQP_ID_S 2
#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
__le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
u8 resv;
};
enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_GSO_B,
HCLGEVF_CAP_QB_B,
HCLGEVF_CAP_FD_FORWARD_TC_B,
HCLGEVF_CAP_PTP_B,
HCLGEVF_CAP_INT_QL_B,
HCLGEVF_CAP_HW_TX_CSUM_B,
HCLGEVF_CAP_TX_PUSH_B,
HCLGEVF_CAP_PHY_IMP_B,
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
};
enum HCLGEVF_API_CAP_BITS {
HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
#define HCLGEVF_VEC_NUM_S 0
#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
struct hclgevf_query_res_cmd {
__le16 tqp_num;
__le16 reserved;
__le16 msixcap_localid_ba_nic;
__le16 msixcap_localid_ba_rocee;
__le16 vf_intr_vector_number;
__le16 rsv[7];
};
#define HCLGEVF_GRO_EN_B 0
struct hclgevf_cfg_gro_status_cmd {
u8 gro_en;
u8 rsv[23];
};
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
};
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
#define HCLGEVF_RSS_TC_OFFSET_S 0
#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
#define HCLGEVF_RSS_TC_SIZE_S 12
#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
struct hclgevf_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd {
u8 status;
u8 rsv[23];
};
#define HCLGEVF_RING_ID_MASK 0x3ff
#define HCLGEVF_TQP_ENABLE_B 0
struct hclgevf_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
u8 enable;
u8 rsv[19];
};
struct hclgevf_cfg_tx_queue_pointer_cmd {
__le16 tqp_id;
__le16 tx_tail;
__le16 tx_head;
__le16 fbd_num;
__le16 ring_offset;
u8 rsv[14];
};
#define HCLGEVF_TYPE_CRQ 0
#define HCLGEVF_TYPE_CSQ 1
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
__le32 mng_entry_num;
__le16 rss_ind_tbl_size;
__le16 rss_key_size;
__le16 int_ql_max;
u8 max_non_tso_bd_num;
u8 rsv1[5];
};
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
__le16 max_frm_size;
__le16 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
/* capabilities bits map between imp firmware and local driver */
struct hclgevf_caps_bit_map {
u16 imp_bit;
u16 local_bit;
};
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = READ_ONCE(base);
return readl(reg_addr + reg);
}
#define hclgevf_write_dev(a, reg, value) \
hclgevf_write_reg((a)->io_base, reg, value)
#define hclgevf_read_dev(a, reg) \
hclgevf_read_reg((a)->io_base, reg)
#define HCLGEVF_SEND_SYNC(flag) \
((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclgevf_opcode_type opcode,
bool is_read);
#endif
|