1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#include "mlx5_core.h"
#include "lib/mlx5.h"
struct mlx5_st_idx_data {
refcount_t usecount;
u16 tag;
};
struct mlx5_st {
/* serialize access upon alloc/free flows */
struct mutex lock;
struct xa_limit index_limit;
struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
};
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
struct mlx5_st *st;
u16 num_entries;
int ret;
if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
return NULL;
#ifdef CONFIG_MLX5_SF
if (mlx5_core_is_sf(dev))
return dev->priv.parent_mdev->st;
#endif
/* Checking whether the device is capable */
if (!pdev->tph_cap)
return NULL;
num_entries = pcie_tph_get_st_table_size(pdev);
/* We need a reserved entry for non TPH cases */
if (num_entries < 2)
return NULL;
/* The OS doesn't support ST */
ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
if (ret)
return NULL;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto end;
mutex_init(&st->lock);
xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
/* entry 0 is reserved for non TPH cases */
st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
st->index_limit.max = num_entries - 1;
return st;
end:
pcie_disable_tph(dev->pdev);
return NULL;
}
void mlx5_st_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_st *st = dev->st;
if (mlx5_core_is_sf(dev) || !st)
return;
pcie_disable_tph(dev->pdev);
WARN_ON_ONCE(!xa_empty(&st->idx_xa));
kfree(st);
}
int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
unsigned int cpu_uid, u16 *st_index)
{
struct mlx5_st_idx_data *idx_data;
struct mlx5_st *st = dev->st;
unsigned long index;
u32 xa_id;
u16 tag;
int ret;
if (!st)
return -EOPNOTSUPP;
ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
if (ret)
return ret;
mutex_lock(&st->lock);
xa_for_each(&st->idx_xa, index, idx_data) {
if (tag == idx_data->tag) {
refcount_inc(&idx_data->usecount);
*st_index = index;
goto end;
}
}
idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
if (!idx_data) {
ret = -ENOMEM;
goto end;
}
refcount_set(&idx_data->usecount, 1);
idx_data->tag = tag;
ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
if (ret)
goto clean_idx_data;
ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
if (ret)
goto clean_idx_xa;
*st_index = xa_id;
goto end;
clean_idx_xa:
xa_erase(&st->idx_xa, xa_id);
clean_idx_data:
kfree(idx_data);
end:
mutex_unlock(&st->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
{
struct mlx5_st_idx_data *idx_data;
struct mlx5_st *st = dev->st;
int ret = 0;
if (!st)
return -EOPNOTSUPP;
mutex_lock(&st->lock);
idx_data = xa_load(&st->idx_xa, st_index);
if (WARN_ON_ONCE(!idx_data)) {
ret = -EINVAL;
goto end;
}
if (refcount_dec_and_test(&idx_data->usecount)) {
xa_erase(&st->idx_xa, st_index);
/* We leave PCI config space as was before, no mkey will refer to it */
}
end:
mutex_unlock(&st->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);
|