1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Management Component Transport Protocol (MCTP)
*
* Copyright (c) 2021 Code Construct
* Copyright (c) 2021 Google
*/
#ifndef __NET_MCTP_H
#define __NET_MCTP_H
#include <linux/bits.h>
#include <linux/mctp.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
/* MCTP packet definitions */
struct mctp_hdr {
u8 ver;
u8 dest;
u8 src;
u8 flags_seq_tag;
};
#define MCTP_VER_MIN 1
#define MCTP_VER_MAX 1
/* Definitions for flags_seq_tag field */
#define MCTP_HDR_FLAG_SOM BIT(7)
#define MCTP_HDR_FLAG_EOM BIT(6)
#define MCTP_HDR_FLAG_TO BIT(3)
#define MCTP_HDR_FLAGS GENMASK(5, 3)
#define MCTP_HDR_SEQ_SHIFT 4
#define MCTP_HDR_SEQ_MASK GENMASK(1, 0)
#define MCTP_HDR_TAG_SHIFT 0
#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
#define MCTP_HEADER_MAXLEN 4
#define MCTP_INITIAL_DEFAULT_NET 1
static inline bool mctp_address_unicast(mctp_eid_t eid)
{
return eid >= 8 && eid < 255;
}
static inline bool mctp_address_broadcast(mctp_eid_t eid)
{
return eid == 255;
}
static inline bool mctp_address_null(mctp_eid_t eid)
{
return eid == 0;
}
static inline bool mctp_address_matches(mctp_eid_t match, mctp_eid_t eid)
{
return match == eid || match == MCTP_ADDR_ANY;
}
static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb)
{
return (struct mctp_hdr *)skb_network_header(skb);
}
/* socket implementation */
struct mctp_sock {
struct sock sk;
/* bind() params */
unsigned int bind_net;
mctp_eid_t bind_addr;
__u8 bind_type;
/* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
bool addr_ext;
/* list of mctp_sk_key, for incoming tag lookup. updates protected
* by sk->net->keys_lock
*/
struct hlist_head keys;
/* mechanism for expiring allocated keys; will release an allocated
* tag, and any netdev state for a request/response pairing
*/
struct timer_list key_expiry;
};
/* Key for matching incoming packets to sockets or reassembly contexts.
* Packets are matched on (src,dest,tag).
*
* Lifetime / locking requirements:
*
* - individual key data (ie, the struct itself) is protected by key->lock;
* changes must be made with that lock held.
*
* - the lookup fields: peer_addr, local_addr and tag are set before the
* key is added to lookup lists, and never updated.
*
* - A ref to the key must be held (throuh key->refs) if a pointer to the
* key is to be accessed after key->lock is released.
*
* - a mctp_sk_key contains a reference to a struct sock; this is valid
* for the life of the key. On sock destruction (through unhash), the key is
* removed from lists (see below), and marked invalid.
*
* - these mctp_sk_keys appear on two lists:
* 1) the struct mctp_sock->keys list
* 2) the struct netns_mctp->keys list
*
* presences on these lists requires a (single) refcount to be held; both
* lists are updated as a single operation.
*
* Updates and lookups in either list are performed under the
* netns_mctp->keys lock. Lookup functions will need to lock the key and
* take a reference before unlocking the keys_lock. Consequently, the list's
* keys_lock *cannot* be acquired with the individual key->lock held.
*
* - a key may have a sk_buff attached as part of an in-progress message
* reassembly (->reasm_head). The reasm data is protected by the individual
* key->lock.
*
* - there are two destruction paths for a mctp_sk_key:
*
* - through socket unhash (see mctp_sk_unhash). This performs the list
* removal under keys_lock.
*
* - where a key is established to receive a reply message: after receiving
* the (complete) reply, or during reassembly errors. Here, we clean up
* the reassembly context (marking reasm_dead, to prevent another from
* starting), and remove the socket from the netns & socket lists.
*
* - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
mctp_eid_t peer_addr;
mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
__u8 tag; /* incoming tag match; invert TO for local */
/* we hold a ref to sk when set */
struct sock *sk;
/* routing lookup list */
struct hlist_node hlist;
/* per-socket list */
struct hlist_node sklist;
/* lock protects against concurrent updates to the reassembly and
* expiry data below.
*/
spinlock_t lock;
/* Keys are referenced during the output path, which may sleep */
refcount_t refs;
/* incoming fragment reassembly context */
struct sk_buff *reasm_head;
struct sk_buff **reasm_tailp;
bool reasm_dead;
u8 last_seq;
/* key validity */
bool valid;
/* expiry timeout; valid (above) cleared on expiry */
unsigned long expiry;
/* free to use for device flow state tracking. Initialised to
* zero on initial key creation
*/
unsigned long dev_flow_state;
struct mctp_dev *dev;
/* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire
* automatically on timeout or response, instead SIOCMCTPDROPTAG
* is used.
*/
bool manual_alloc;
};
struct mctp_skb_cb {
unsigned int magic;
unsigned int net;
int ifindex; /* extended/direct addressing if set */
mctp_eid_t src;
unsigned char halen;
unsigned char haddr[MAX_ADDR_LEN];
};
/* skb control-block accessors with a little extra debugging for initial
* development.
*
* TODO: remove checks & mctp_skb_cb->magic; replace callers of __mctp_cb
* with mctp_cb().
*
* __mctp_cb() is only for the initial ingress code; we should see ->magic set
* at all times after this.
*/
static inline struct mctp_skb_cb *__mctp_cb(struct sk_buff *skb)
{
struct mctp_skb_cb *cb = (void *)skb->cb;
cb->magic = 0x4d435450;
return cb;
}
static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
{
struct mctp_skb_cb *cb = (void *)skb->cb;
BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
WARN_ON(cb->magic != 0x4d435450);
return (void *)(skb->cb);
}
/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
* indicating the flow to the device driver.
*/
struct mctp_flow {
struct mctp_sk_key *key;
};
/* Route definition.
*
* These are held in the pernet->mctp.routes list, with RCU protection for
* removed routes. We hold a reference to the netdev; routes need to be
* dropped on NETDEV_UNREGISTER events.
*
* Updates to the route table are performed under rtnl; all reads under RCU,
* so routes cannot be referenced over a RCU grace period. Specifically: A
* caller cannot block between mctp_route_lookup and mctp_route_release()
*/
struct mctp_route {
mctp_eid_t min, max;
struct mctp_dev *dev;
unsigned int mtu;
unsigned char type;
int (*output)(struct mctp_route *route,
struct sk_buff *skb);
struct list_head list;
refcount_t refs;
struct rcu_head rcu;
};
/* route interfaces */
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
void mctp_key_unref(struct mctp_sk_key *key);
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
mctp_eid_t daddr, mctp_eid_t saddr,
bool manual, u8 *tagp);
/* routing <--> device interface */
unsigned int mctp_default_net(struct net *net);
int mctp_default_net_set(struct net *net, unsigned int index);
int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr);
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr);
void mctp_route_remove_dev(struct mctp_dev *mdev);
/* neighbour definitions */
enum mctp_neigh_source {
MCTP_NEIGH_STATIC,
MCTP_NEIGH_DISCOVER,
};
struct mctp_neigh {
struct mctp_dev *dev;
mctp_eid_t eid;
enum mctp_neigh_source source;
unsigned char ha[MAX_ADDR_LEN];
struct list_head list;
struct rcu_head rcu;
};
int mctp_neigh_init(void);
void mctp_neigh_exit(void);
// ret_hwaddr may be NULL, otherwise must have space for MAX_ADDR_LEN
int mctp_neigh_lookup(struct mctp_dev *dev, mctp_eid_t eid,
void *ret_hwaddr);
void mctp_neigh_remove_dev(struct mctp_dev *mdev);
int mctp_routes_init(void);
void mctp_routes_exit(void);
void mctp_device_init(void);
void mctp_device_exit(void);
#endif /* __NET_MCTP_H */
|