1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
|
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_H_
#define _ENIC_H_
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define ENIC_BARS_MAX 6
#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
#define ENIC_WQ_NAPI_BUDGET 256
#define ENIC_AIC_LARGE_PKT_DIFF 3
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
irqreturn_t (*isr)(int, void *);
void *devid;
cpumask_var_t affinity_mask;
};
/* Store only the lower range. Higher range is given by fw. */
struct enic_intr_mod_range {
u32 small_pkt_range_start;
u32 large_pkt_range_start;
};
struct enic_intr_mod_table {
u32 rx_rate;
u32 range_percent;
};
#define ENIC_MAX_LINK_SPEEDS 3
#define ENIC_LINK_SPEED_10G 10000
#define ENIC_LINK_SPEED_4G 4000
#define ENIC_LINK_40G_INDEX 2
#define ENIC_LINK_10G_INDEX 1
#define ENIC_LINK_4G_INDEX 0
#define ENIC_RX_COALESCE_RANGE_END 125
#define ENIC_AIC_TS_BREAK 100
struct enic_rx_coal {
u32 small_pkt_range_start;
u32 large_pkt_range_start;
u32 range_end;
u32 use_adaptive_rx_coalesce;
};
/* priv_flags */
#define ENIC_SRIOV_ENABLED (1 << 0)
/* enic port profile set flags */
#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
struct enic_port_profile {
u32 set;
u8 request;
char name[PORT_PROFILE_MAX];
u8 instance_uuid[PORT_UUID_MAX];
u8 host_uuid[PORT_UUID_MAX];
u8 vf_mac[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
};
/* enic_rfs_fltr_node - rfs filter node in hash table
* @@keys: IPv4 5 tuple
* @flow_id: flow_id of clsf filter provided by kernel
* @fltr_id: filter id of clsf filter returned by adaptor
* @rq_id: desired rq index
* @node: hlist_node
*/
struct enic_rfs_fltr_node {
struct flow_keys keys;
u32 flow_id;
u16 fltr_id;
u16 rq_id;
struct hlist_node node;
};
/* enic_rfs_flw_tbl - rfs flow table
* @max: Maximum number of filters vNIC supports
* @free: Number of free filters available
* @toclean: hash table index to clean next
* @ht_head: hash table list head
* @lock: spin lock
* @rfs_may_expire: timer function for enic_rps_may_expire_flow
*/
struct enic_rfs_flw_tbl {
u16 max;
int free;
#define ENIC_RFS_FLW_BITSHIFT (10)
#define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
u16 toclean:ENIC_RFS_FLW_BITSHIFT;
struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
spinlock_t lock;
struct timer_list rfs_may_expire;
};
struct vxlan_offload {
u16 vxlan_udp_port_number;
u8 patch_level;
u8 flags;
};
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
struct pci_dev *pdev;
struct vnic_enet_config config;
struct vnic_dev_bar bar[ENIC_BARS_MAX];
struct vnic_dev *vdev;
struct timer_list notify_timer;
struct work_struct reset;
struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
struct msix_entry msix_entry[ENIC_INTR_MAX];
struct enic_msix_entry msix[ENIC_INTR_MAX];
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
unsigned int flags;
unsigned int priv_flags;
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
struct enic_rx_coal rx_coalesce_setting;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
u16 num_vfs;
#endif
spinlock_t enic_api_lock;
struct enic_port_profile *pp;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
spinlock_t wq_lock[ENIC_WQ_MAX];
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
struct vxlan_offload vxlan;
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
{
struct enic *enic = vdev->priv;
return enic->netdev;
}
/* wrappers function for kernel log
*/
#define vdev_err(vdev, fmt, ...) \
dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
#define vdev_warn(vdev, fmt, ...) \
dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
#define vdev_info(vdev, fmt, ...) \
dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
#define vdev_neterr(vdev, fmt, ...) \
netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
#define vdev_netwarn(vdev, fmt, ...) \
netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
#define vdev_netinfo(vdev, fmt, ...) \
netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
static inline struct device *enic_get_dev(struct enic *enic)
{
return &(enic->pdev->dev);
}
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
{
return rq;
}
static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
{
return enic->rq_count + wq;
}
static inline unsigned int enic_legacy_io_intr(void)
{
return 0;
}
static inline unsigned int enic_legacy_err_intr(void)
{
return 1;
}
static inline unsigned int enic_legacy_notify_intr(void)
{
return 2;
}
static inline unsigned int enic_msix_rq_intr(struct enic *enic,
unsigned int rq)
{
return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
}
static inline unsigned int enic_msix_wq_intr(struct enic *enic,
unsigned int wq)
{
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
static inline unsigned int enic_msix_err_intr(struct enic *enic)
{
return enic->rq_count + enic->wq_count;
}
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
return enic->rq_count + enic->wq_count + 1;
}
static inline bool enic_is_err_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
return intr == enic_legacy_err_intr();
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_err_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
default:
return false;
}
}
static inline bool enic_is_notify_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
return intr == enic_legacy_notify_intr();
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_notify_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
default:
return false;
}
}
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
return -ENOMEM;
}
return 0;
}
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
#endif /* _ENIC_H_ */
|