summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-08-23 21:08:23 +0300
committerDavid S. Miller <davem@davemloft.net>2016-08-23 21:08:23 +0300
commit01555e64491438a8676f30dbd3fa464417a42e5b (patch)
tree9c2b6eb215ea2f9dbdf32b0cb0361c5dfe2cc460
parent2d03d4394c13d017968c707150d97ad251900084 (diff)
parent575ddf5888eaf8f271cb3df7b0806cb2db2c333a (diff)
downloadlinux-01555e64491438a8676f30dbd3fa464417a42e5b.tar.xz
Merge tag 'shared-for-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma
Saeed Mahameed says: ==================== Mellanox mlx5 core driver updates 2016-08-20 This series contains several low level and API updates for mlx5 core commands interface and mlx5_ifc.h to be shared as base code for net-next and rdma mlx5 4.9 submissions. From Saeed, ten patches that refactors old layouts of firmware commands which were manually generated before we introduced the mlx5_ifc, now all of the firmware commands inbox/outbox layouts moved to use mlx5_ifc and we remove the old manually generated structures. Plus to those ten patches, we add two patches that unifies mlx5 commands execution interface and improve the driver log messages in that area. From Hadar and Ilya, added the needed hardware bits and infrastructure for minimum inline headers setting and encap/decap commands and capabilities, needed for E-Switch offloads. This series applies on top latest net-next and rdma/master, and smoothly merges with the latest "Mellanox 100G mlx5 fixes 2016-08-16" series already applied into net branch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c110
-rw-r--r--drivers/infiniband/hw/mlx5/main.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c184
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c29
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h429
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h116
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
40 files changed, 1390 insertions, 2242 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 308a358e5b46..35a9f718e669 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
- int entries, struct mlx5_create_cq_mbox_in **cqb,
+ int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
size_t ucmdlen;
int page_shift;
+ __be64 *pas;
int npages;
int ncont;
+ void *cqc;
int err;
ucmdlen =
@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
- mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
- (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index;
@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
- struct mlx5_create_cq_mbox_in **cqb,
- int *index, int *inlen)
+ u32 **cqb, int *index, int *inlen)
{
+ __be64 *pas;
+ void *cqc;
int err;
err = mlx5_db_alloc(dev->mdev, &cq->db);
@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf(cq, &cq->buf);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
- (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_fill_page_array(&cq->buf.buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
*index = dev->mdev->priv.uuari.uars[0].index;
return 0;
@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
{
int entries = attr->cqe;
int vector = attr->comp_vector;
- struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
+ u32 *cqb = NULL;
+ void *cqc;
int cqe_size;
unsigned int irqn;
int eqn;
@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- cq->cqe_size = cqe_size;
- cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
-
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
- cqb->ctx.cqe_sz_flags |= (1 << 1);
-
- cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
- cqb->ctx.c_eqn = cpu_to_be16(eqn);
- cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
+ cq->cqe_size = cqe_size;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
+ MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+ MLX5_SET(cqc, cqc, uar_page, index);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
int err;
- u32 fsel;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- in->cqn = cpu_to_be32(mcq->mcq.cqn);
- fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
- in->ctx.cq_period = cpu_to_be16(cq_period);
- in->ctx.cq_max_count = cpu_to_be16(cq_count);
- in->field_select = cpu_to_be32(fsel);
- err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
- kfree(in);
-
+ err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
+ cq_period, cq_count);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- struct mlx5_modify_cq_mbox_in *in;
+ void *cqc;
+ u32 *in;
int err;
int npas;
+ __be64 *pas;
int page_shift;
int inlen;
int uninitialized_var(cqe_size);
@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
if (err)
goto ex;
- inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
+ inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
+ MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
+
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
+ pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
- in->pas, 0);
+ pas, 0);
else
- mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
-
- in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
- MLX5_MODIFY_CQ_MASK_PG_OFFSET |
- MLX5_MODIFY_CQ_MASK_PG_SIZE);
- in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
- in->ctx.page_offset = 0;
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
- in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
- in->cqn = cpu_to_be32(cq->mcq.cqn);
+ mlx5_fill_page_array(&cq->resize_buf->buf, pas);
+
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.resize_field_select.resize_field_select,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+
+ MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
+ MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a84bb766fc62..6fb77d791045 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -233,23 +233,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- struct mlx5_ib_dev *dev = to_mdev(device);
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
if (ll != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
- memset(in, 0, sizeof(in));
-
ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
-
- memset(out, 0, sizeof(out));
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 372385d0f993..a59034aaa297 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -504,7 +504,7 @@ struct mlx5_ib_mr {
int umred;
int npages;
struct mlx5_ib_dev *dev;
- struct mlx5_create_mkey_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
int live;
void *descs_alloc;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4b021305c321..6f7e34753abc 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -135,20 +135,10 @@ static void reg_mr_callback(int status, void *context)
return;
}
- if (mr->out.hdr.status) {
- mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
- mr->out.hdr.status,
- be32_to_cpu(mr->out.hdr.syndrome));
- kfree(mr);
- dev->fill_delay = 1;
- mod_timer(&dev->delay_timer, jiffies + HZ);
- return;
- }
-
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
cache->last_add = jiffies;
@@ -170,16 +160,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
int npages = 1 << ent->order;
+ void *mkc;
+ u32 *in;
int err = 0;
int i;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
if (ent->pending >= MAX_PENDING_REG_MR) {
err = -EAGAIN;
@@ -194,18 +187,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr->order = ent->order;
mr->umred = 1;
mr->dev = dev;
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
- in->seg.log2_page_size = 12;
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
+ MLX5_SET(mkc, mkc, log_page_size, 12);
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
- sizeof(*in), reg_mr_callback,
- mr, &mr->out);
+ err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
+ in, inlen,
+ mr->out, sizeof(mr->out),
+ reg_mr_callback, mr);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@@ -670,30 +667,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- seg = &in->seg;
- seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, 0);
+
+ err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -1063,9 +1068,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
int page_shift, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
+ __be64 *pas;
+ void *mkc;
int inlen;
+ u32 *in;
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
@@ -1073,31 +1080,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if (!mr)
return ERR_PTR(-ENOMEM);
- inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*pas) * ((npages + 1) / 2) * 2;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_1;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
- /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ /* The pg_access bit allows setting the access flags
* in the page list submitted with the command. */
- in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
- in->seg.flags = convert_access(access_flags) |
- MLX5_ACCESS_MODE_MTT;
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->seg.start_addr = cpu_to_be64(virt_addr);
- in->seg.len = cpu_to_be64(length);
- in->seg.bsfs_octo_size = 0;
- in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
- in->seg.log2_page_size = page_shift;
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
- 1 << page_shift));
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
- NULL, NULL);
+ MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET64(mkc, mkc, start_addr, virt_addr);
+ MLX5_SET64(mkc, mkc, len, length);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(virt_addr, length, 1 << page_shift));
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(virt_addr, length, 1 << page_shift));
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -1523,30 +1540,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
u32 max_num_sg)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_ib_mr *mr;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
int ndescs = ALIGN(max_num_sg, 4);
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32(ndescs);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
if (mr_type == IB_MR_TYPE_MEM_REG) {
- mr->access_mode = MLX5_ACCESS_MODE_MTT;
- in->seg.log2_page_size = PAGE_SHIFT;
-
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
err = mlx5_alloc_priv_descs(pd->device, mr,
ndescs, sizeof(u64));
if (err)
@@ -1555,7 +1574,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->desc_size = sizeof(u64);
mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
- mr->access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
err = mlx5_alloc_priv_descs(pd->device, mr,
ndescs, sizeof(struct mlx5_klm));
@@ -1566,9 +1585,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2];
- in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
- MLX5_MKEY_BSF_EN);
- in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+ MLX5_SET(mkc, mkc, bsf_en, 1);
+ MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
if (!mr->sig) {
err = -ENOMEM;
@@ -1581,7 +1599,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_free_sig;
- mr->access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
mr->sig->psv_memory.psv_idx = psv_index[0];
mr->sig->psv_wire.psv_idx = psv_index[1];
@@ -1595,9 +1613,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
goto err_free_in;
}
- in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
- NULL, NULL, NULL);
+ MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
@@ -1633,8 +1652,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in = NULL;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = NULL;
+ u32 *in = NULL;
+ void *mkc;
int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
@@ -1658,23 +1679,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!mw || !in) {
err = -ENOMEM;
goto free;
}
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32(ndescs);
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
- MLX5_PERM_LOCAL_READ;
- if (type == IB_MW_TYPE_2)
- in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-
- err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
- NULL, NULL, NULL);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
if (err)
goto free;
@@ -1811,7 +1833,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->desc_size * mr->max_descs,
DMA_TO_DEVICE);
- if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
else
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0dd7d93cac95..f3c943f6458e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -726,7 +726,7 @@ err_umem:
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
- struct mlx5_create_qp_mbox_in **in,
+ u32 **in,
struct mlx5_ib_create_qp_resp *resp, int *inlen,
struct mlx5_ib_qp_base *base)
{
@@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 offset = 0;
int uuarn;
int ncont = 0;
+ __be64 *pas;
+ void *qpc;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
@@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ubuffer->umem = NULL;
}
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
err = -ENOMEM;
goto err_umem;
}
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
- mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift,
- (*in)->pas, 0);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
- (*in)->ctx.params2 = cpu_to_be32(offset << 6);
+ mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
+ MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, page_offset, offset);
+
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
resp->uuar_index = uuarn;
qp->uuarn = uuarn;
@@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
static int create_kernel_qp(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_qp *qp,
- struct mlx5_create_qp_mbox_in **in, int *inlen,
+ u32 **in, int *inlen,
struct mlx5_ib_qp_base *base)
{
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
struct mlx5_uuar_info *uuari;
int uar_index;
+ void *qpc;
int uuarn;
int err;
@@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
}
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
err = -ENOMEM;
goto err_buf;
}
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
/* Set "fast registration enabled" for all kernel QPs */
- (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
- (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
- (*in)->ctx.deth_sqpn = cpu_to_be32(1);
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
- mlx5_fill_page_array(&qp->buf, (*in)->pas);
+ mlx5_fill_page_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas));
err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) {
@@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
}
-static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
(attr->qp_type == IB_QPT_XRC_INI))
- return cpu_to_be32(MLX5_SRQ_RQ);
+ return MLX5_SRQ_RQ;
else if (!qp->has_rq)
- return cpu_to_be32(MLX5_ZERO_LEN_RQ);
+ return MLX5_ZERO_LEN_RQ;
else
- return cpu_to_be32(MLX5_NON_ZERO_RQ);
+ return MLX5_NON_ZERO_RQ;
}
static int is_connected(enum ib_qp_type qp_type)
@@ -996,13 +1007,10 @@ static int is_connected(enum ib_qp_type qp_type)
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- memset(in, 0, sizeof(in));
-
MLX5_SET(tisc, tisc, transport_domain, tdn);
-
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}
@@ -1191,7 +1199,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
struct ib_pd *pd)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1461,18 +1469,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_qp_base *base;
struct mlx5_ib_create_qp_resp resp;
- struct mlx5_create_qp_mbox_in *in;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
- int inlen = sizeof(*in);
- int err;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ struct mlx5_ib_create_qp ucmd;
+ struct mlx5_ib_qp_base *base;
void *qpc;
+ u32 *in;
+ int err;
base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
&qp->raw_packet_qp.rq.base :
@@ -1600,7 +1608,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
return err;
} else {
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
@@ -1610,26 +1618,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
- in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
- MLX5_QP_PM_MIGRATED << 11);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
else
- in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
if (qp->wq_sig)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
+ MLX5_SET(qpc, qpc, wq_signature, 1);
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
+ MLX5_SET(qpc, qpc, cd_master, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz;
@@ -1639,71 +1650,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
if (rcqe_sz == 128)
- in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
else
- in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
if (scqe_sz == 128)
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
else
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}
}
if (qp->rq.wqe_cnt) {
- in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
- in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
if (qp->sq.wqe_cnt)
- in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
else
- in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
+ MLX5_SET(qpc, qpc, no_sq, 1);
/* Set default resources */
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
break;
case IB_QPT_XRC_INI:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
break;
default:
if (init_attr->srq) {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
} else {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |=
- cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
}
}
if (init_attr->send_cq)
- in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
if (init_attr->recv_cq)
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
- in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
- qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- /* 0xffffff means we ask to work with cqe version 0 */
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
- }
+
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
if (init_attr->qp_type == IB_QPT_UD &&
(init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
- qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
qp->flags |= MLX5_IB_QP_LSO;
}
@@ -1860,7 +1868,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
- struct mlx5_modify_qp_mbox_in *in;
unsigned long flags;
int err;
@@ -1873,16 +1880,12 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return;
-
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
mlx5_ib_qp_disable_pagefaults(qp);
err = mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_2RST_QP, in, 0,
- &base->mqp);
+ MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp);
} else {
err = modify_raw_packet_qp(dev, qp,
MLX5_CMD_OP_2RST_QP);
@@ -1924,8 +1927,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base->mqp.qpn);
}
- kfree(in);
-
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
@@ -2511,7 +2512,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
- struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
@@ -2520,11 +2520,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
int err;
u16 op;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- context = &in->ctx;
err = to_mlx5_st(ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
@@ -2689,12 +2688,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- in->optparam = cpu_to_be32(optpar);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
err = modify_raw_packet_qp(dev, qp, op);
else
- err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event,
+ err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
&base->mqp);
if (err)
goto out;
@@ -2735,7 +2733,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(in);
+ kfree(context);
return err;
}
@@ -2968,7 +2966,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
memset(umr, 0, sizeof(*umr));
- if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
/* KLMs take twice the size of MTTs */
ndescs *= 2;
@@ -3111,9 +3109,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
memset(seg, 0, sizeof(*seg));
- if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
seg->log2_page_size = ilog2(mr->ibmr.page_size);
- else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
/* KLMs take twice the size of MTTs */
ndescs *= 2;
@@ -3454,7 +3452,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
memset(seg, 0, sizeof(*seg));
seg->flags = get_umr_flags(wr->access_flags) |
- MLX5_ACCESS_MODE_KLM;
+ MLX5_MKC_ACCESS_MODE_KLMS;
seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
MLX5_MKEY_BSF_EN | pdn);
@@ -4320,21 +4318,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_attr *qp_attr)
{
- struct mlx5_query_qp_mbox_out *outb;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *context;
int mlx5_state;
+ u32 *outb;
int err = 0;
- outb = kzalloc(sizeof(*outb), GFP_KERNEL);
+ outb = kzalloc(outlen, GFP_KERNEL);
if (!outb)
return -ENOMEM;
- context = &outb->ctx;
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
- sizeof(*outb));
+ outlen);
if (err)
goto out;
+ /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+ context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+
mlx5_state = be32_to_cpu(context->flags) >> 28;
qp->state = to_ib_qp_state(mlx5_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index bf682252130b..e9f6afb8c19b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -280,7 +280,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DETACH_FROM_MCG:
case MLX5_CMD_OP_DEALLOC_XRCD:
case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
@@ -301,6 +301,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+ case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -402,6 +403,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -490,7 +492,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
MLX5_COMMAND_STR_CASE(ACCESS_REG);
MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
- MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
MLX5_COMMAND_STR_CASE(MAD_IFC);
MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
@@ -550,15 +552,130 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
default: return "unknown command opcode";
}
}
+static const char *cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK:
+ return "OK";
+ case MLX5_CMD_STAT_INT_ERR:
+ return "internal error";
+ case MLX5_CMD_STAT_BAD_OP_ERR:
+ return "bad operation";
+ case MLX5_CMD_STAT_BAD_PARAM_ERR:
+ return "bad parameter";
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
+ return "bad system state";
+ case MLX5_CMD_STAT_BAD_RES_ERR:
+ return "bad resource";
+ case MLX5_CMD_STAT_RES_BUSY:
+ return "resource busy";
+ case MLX5_CMD_STAT_LIM_ERR:
+ return "limits exceeded";
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
+ return "bad resource state";
+ case MLX5_CMD_STAT_IX_ERR:
+ return "bad index";
+ case MLX5_CMD_STAT_NO_RES_ERR:
+ return "no resources";
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
+ return "bad input length";
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
+ return "bad output length";
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
+ return "bad QP state";
+ case MLX5_CMD_STAT_BAD_PKT_ERR:
+ return "bad packet (discarded)";
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
+ return "bad size too many outstanding CQEs";
+ default:
+ return "unknown status";
+ }
+}
+
+static int cmd_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK: return 0;
+ case MLX5_CMD_STAT_INT_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
+ default: return -EIO;
+ }
+}
+
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+{
+ *status = MLX5_GET(mbox_out, out, status);
+ *syndrome = MLX5_GET(mbox_out, out, syndrome);
+}
+
+static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+{
+ u32 syndrome;
+ u8 status;
+ u16 opcode;
+ u16 op_mod;
+
+ mlx5_cmd_mbox_status(out, &status, &syndrome);
+ if (!status)
+ return 0;
+
+ opcode = MLX5_GET(mbox_in, in, opcode);
+ op_mod = MLX5_GET(mbox_in, in, op_mod);
+
+ mlx5_core_err(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+ mlx5_command_str(opcode),
+ opcode, op_mod,
+ cmd_status_str(status),
+ status,
+ syndrome);
+
+ return cmd_status_to_err(status);
+}
+
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
- u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+ u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int data_only;
u32 offset = 0;
@@ -608,9 +725,7 @@ static void dump_command(struct mlx5_core_dev *dev,
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
+ return MLX5_GET(mbox_in, in->first.data, opcode);
}
static void cb_timeout_handler(struct work_struct *work)
@@ -749,16 +864,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
-static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->syndrome;
-}
-
-static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->status;
-}
-
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -804,7 +909,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ op = MLX5_GET(mbox_in, in->first.data, opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
@@ -1290,11 +1395,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
callback = ent->callback;
context = ent->context;
err = ent->ret;
- if (!err)
+ if (!err) {
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
+ err = err ? err : mlx5_cmd_check(dev,
+ ent->in->first.data,
+ ent->uout);
+ }
+
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
@@ -1346,14 +1456,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
-static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
-{
- return be16_to_cpu(in->opcode);
-}
-
-static int is_manage_pages(struct mlx5_inbox_hdr *in)
+static int is_manage_pages(void *in)
{
- return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
@@ -1369,9 +1474,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
- *get_synd_ptr(out) = cpu_to_be32(drv_synd);
- *get_status_ptr(out) = status;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, drv_synd);
return err;
}
@@ -1423,7 +1530,10 @@ out_in:
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ int err;
+
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@@ -1660,96 +1770,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
pci_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
-
-static const char *cmd_status_str(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK:
- return "OK";
- case MLX5_CMD_STAT_INT_ERR:
- return "internal error";
- case MLX5_CMD_STAT_BAD_OP_ERR:
- return "bad operation";
- case MLX5_CMD_STAT_BAD_PARAM_ERR:
- return "bad parameter";
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
- return "bad system state";
- case MLX5_CMD_STAT_BAD_RES_ERR:
- return "bad resource";
- case MLX5_CMD_STAT_RES_BUSY:
- return "resource busy";
- case MLX5_CMD_STAT_LIM_ERR:
- return "limits exceeded";
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
- return "bad resource state";
- case MLX5_CMD_STAT_IX_ERR:
- return "bad index";
- case MLX5_CMD_STAT_NO_RES_ERR:
- return "no resources";
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
- return "bad input length";
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
- return "bad output length";
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
- return "bad QP state";
- case MLX5_CMD_STAT_BAD_PKT_ERR:
- return "bad packet (discarded)";
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
- return "bad size too many outstanding CQEs";
- default:
- return "unknown status";
- }
-}
-
-static int cmd_status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK: return 0;
- case MLX5_CMD_STAT_INT_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
- case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
- case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
- case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
- default: return -EIO;
- }
-}
-
-/* this will be available till all the commands use set/get macros */
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
-{
- if (!hdr->status)
- return 0;
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(hdr->status), hdr->status,
- be32_to_cpu(hdr->syndrome));
-
- return cmd_status_to_err(hdr->status);
-}
-
-int mlx5_cmd_status_to_err_v2(void *ptr)
-{
- u32 syndrome;
- u8 status;
-
- status = be32_to_cpu(*(__be32 *)ptr) >> 24;
- if (!status)
- return 0;
-
- syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(status), status, syndrome);
-
- return cmd_status_to_err(status);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 873a631ad155..32d4af9b594d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete(&cq->free);
}
-
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen)
+ u32 *in, int inlen)
{
- int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_create_cq_mbox_out out;
- struct mlx5_destroy_cq_mbox_in din;
- struct mlx5_destroy_cq_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn);
struct mlx5_eq *eq;
+ int err;
eq = mlx5_eqn2eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
- memset(&out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
+ cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_destroy_cq_mbox_in in;
- struct mlx5_destroy_cq_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out)
+ u32 *out, int outlen)
{
- struct mlx5_query_cq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
- return err;
+ MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
+ MLX5_SET(query_cq_in, in, cqn, cq->cqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
-
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz)
+ u32 *in, int inlen)
{
- struct mlx5_modify_cq_mbox_out out;
- int err;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
- err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return 0;
+ MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- struct mlx5_modify_cq_mbox_in in;
-
- memset(&in, 0, sizeof(in));
-
- in.cqn = cpu_to_be32(cq->cqn);
- in.ctx.cq_period = cpu_to_be16(cq_period);
- in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
- in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
- MLX5_CQ_MODIFY_COUNT);
-
- return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
+EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 5210d92e6bc7..e94a9532e218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
- struct mlx5_query_qp_mbox_out *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *ctx;
u64 param = 0;
+ u32 *out;
int err;
int no_sq;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
+ err = mlx5_core_qp_query(dev, qp, out, outlen);
if (err) {
- mlx5_core_warn(dev, "failed to query qp\n");
+ mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
goto out;
}
*is_str = 0;
- ctx = &out->ctx;
+
+ /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+ ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
+
switch (index) {
case QP_PID:
param = qp->pid;
@@ -358,32 +362,32 @@ out:
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
- struct mlx5_query_eq_mbox_out *out;
- struct mlx5_eq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
+ err = mlx5_core_eq_query(dev, eq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
switch (index) {
case EQ_NUM_EQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
break;
case EQ_INTR:
- param = ctx->intr;
+ param = MLX5_GET(eqc, ctx, intr);
break;
case EQ_LOG_PG_SZ:
- param = (ctx->log_page_size & 0x1f) + 12;
+ param = MLX5_GET(eqc, ctx, log_page_size) + 12;
break;
}
@@ -395,37 +399,37 @@ out:
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
- struct mlx5_query_cq_mbox_out *out;
- struct mlx5_cq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = mlx5_vzalloc(outlen);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_query_cq(dev, cq, out);
+ err = mlx5_core_query_cq(dev, cq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break;
case CQ_LOG_PG_SZ:
- param = (ctx->log_pg_sz & 0x1f) + 12;
+ param = MLX5_GET(cqc, ctx, log_page_size);
break;
}
out:
- kfree(out);
+ kvfree(out);
return param;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 673043ccd76c..3247c3c543a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey)
{
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 4a3757e60441..9561fca494bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -726,7 +726,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_lp;
@@ -736,7 +736,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
-
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 870bea37c57c..10fa12aa063f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -180,18 +180,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_core_dev *mdev = priv->mdev;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
-
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
@@ -2022,14 +2019,11 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- memset(in, 0, sizeof(in));
-
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
-
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -3228,35 +3222,34 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *mkc;
- int inlen = sizeof(*in);
- u64 npages =
- priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ u64 npages = priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- mkc = &in->seg;
- mkc->status = MLX5_MKEY_STATUS_FREE;
- mkc->flags = MLX5_PERM_UMR_EN |
- MLX5_PERM_LOCAL_READ |
- MLX5_PERM_LOCAL_WRITE |
- MLX5_ACCESS_MODE_MTT;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
- mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
- mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
- mkc->log2_page_size = PAGE_SHIFT;
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
- err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
- NULL, NULL);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ mlx5e_get_mtt_octw(npages));
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 1c7d8b8314bf..681c12c14005 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -416,8 +416,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
{
rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
if (!rep->priv_data) {
- pr_warn("Failed to create representor for vport %d\n",
- rep->vport);
+ mlx5_core_warn(esw->dev, "Failed to create representor for vport %d\n",
+ rep->vport);
return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 0e30602ef76d..aaca09002ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -86,23 +86,12 @@ struct cre_des_eq {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- struct mlx5_destroy_eq_mbox_in in;
- struct mlx5_destroy_eq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
- in.eqn = eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (!err)
- goto ex;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
-ex:
- return err;
+ MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
+ MLX5_SET(destroy_eq_in, in, eq_number, eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
+ u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_create_eq_mbox_in *in;
- struct mlx5_create_eq_mbox_out out;
- int err;
+ __be64 *pas;
+ void *eqc;
int inlen;
+ u32 *in;
+ int err;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf(eq);
- inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
+ inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
+ MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
- memset(&out, 0, sizeof(out));
- mlx5_fill_page_array(&eq->buf, in->pas);
+ pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
+ mlx5_fill_page_array(&eq->buf, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
- in->ctx.intr = vecidx;
- in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->events_mask = cpu_to_be64(mask);
+ MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+ MLX5_SET64(create_eq_in, in, event_bitmask, mask);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- goto err_in;
+ eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
+ MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+ MLX5_SET(eqc, eqc, uar_page, uar->index);
+ MLX5_SET(eqc, eqc, intr, vecidx);
+ MLX5_SET(eqc, eqc, log_page_size,
+ eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
goto err_in;
- }
snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
- eq->eqn = out.eq_number;
+ eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_eq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
- in.eqn = eq->eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f6d667797ee1..7c4935993b99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
- int err;
-
- memset(out, 0, sizeof(out));
- memset(in, 0, sizeof(in));
MLX5_SET(modify_nic_vport_context_in, in,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
@@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
- return 0;
-ex:
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* E-Switch vport context HW commands */
static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
-
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 *vlan, u8 *qos)
{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
int err;
bool cvlan_strip;
bool cvlan_insert;
- memset(out, 0, sizeof(out));
-
*vlan = 0;
*qos = 0;
@@ -188,27 +170,20 @@ out:
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
+ MLX5_SET(modify_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
-
- MLX5_SET(modify_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
-
- return mlx5_cmd_exec_check_status(dev, in, inlen,
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 vlan, u8 qos, bool set)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
@@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
vport, vlan, qos, set);
-
if (set) {
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
@@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
u8 *mac, u8 vlan_valid, u16 vlan)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
u8 *in_mac_addr;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(set_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
MLX5_SET(set_l2_table_entry_in, in, table_index, index);
@@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
MLX5_SET(delete_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
@@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
- pr_warn("FDB: Failed to alloc match parameters\n");
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
return NULL;
}
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) {
- pr_warn(
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
+ esw_warn(esw->dev,
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
@@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
- pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
@@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
- pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.drop_rule = NULL;
goto out;
}
@@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
- pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.allowed_vlan = NULL;
goto out;
}
@@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
- pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
@@ -1903,7 +1873,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
int err = 0;
u32 *out;
@@ -1916,8 +1886,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!out)
return -ENOMEM;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9134010e2921..7aaefa9aaf1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -41,10 +41,8 @@
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
@@ -55,9 +53,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
@@ -66,12 +62,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
@@ -87,10 +81,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
-
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -100,11 +91,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -115,19 +103,15 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
@@ -146,8 +130,7 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
@@ -155,12 +138,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
u32 *in,
unsigned int *group_id)
{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
- memset(out, 0, sizeof(out));
-
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
@@ -170,13 +151,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in,
- inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out,
group_id);
-
return err;
}
@@ -184,11 +162,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
@@ -200,8 +175,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -212,7 +186,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
{
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
- u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
@@ -290,11 +264,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
kvfree(in);
-
return err;
}
@@ -303,7 +274,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
unsigned group_id,
struct fs_fte *fte)
{
- return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+ return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
@@ -327,12 +298,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int index)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
- int err;
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@@ -343,74 +310,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
-
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- return err;
-
- *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
-
- return 0;
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ return err;
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)];
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
void *stats;
int err = 0;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
-
return 0;
}
@@ -448,18 +396,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
{
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- b->out, b->outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
@@ -480,3 +424,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
+
+#define MAX_ENCAP_SIZE (128)
+
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
+ (MAX_ENCAP_SIZE / sizeof(u32))];
+ void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
+ encap_header);
+ void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
+ encap_header);
+ int inlen = header - (void *)in + size;
+ int err;
+
+ if (size > MAX_ENCAP_SIZE)
+ return -EINVAL;
+
+ memset(in, 0, inlen);
+ MLX5_SET(alloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
+ MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
+ MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
+ memcpy(header, encap_header, size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ return err;
+}
+
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(dealloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 158844cef82b..ac52fdfb5096 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -88,4 +88,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id);
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 75bb8c864557..243efc5a8bd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -345,7 +345,7 @@ static void del_flow_table(struct fs_node *node)
err = mlx5_cmd_destroy_flow_table(dev, ft);
if (err)
- pr_warn("flow steering can't destroy ft\n");
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
}
@@ -364,7 +364,7 @@ static void del_rule(struct fs_node *node)
match_value = mlx5_vzalloc(match_len);
if (!match_value) {
- pr_warn("failed to allocate inbox\n");
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
return;
}
@@ -387,8 +387,9 @@ static void del_rule(struct fs_node *node)
modify_mask,
fte);
if (err)
- pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
- __func__, fg->id, fte->index);
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
}
kvfree(match_value);
}
@@ -409,8 +410,9 @@ static void del_fte(struct fs_node *node)
err = mlx5_cmd_delete_fte(dev, ft,
fte->index);
if (err)
- pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
fte->status = 0;
fg->num_ftes--;
@@ -427,8 +429,8 @@ static void del_flow_group(struct fs_node *node)
dev = get_dev(&ft->node);
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- pr_warn("flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
}
static struct fs_fte *alloc_fte(u8 action,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 77fc1aa26114..5718aada6605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -38,13 +38,10 @@
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_board_id(struct mlx5_core_dev *dev)
@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_init_hca_mbox_in in;
- struct mlx5_cmd_init_hca_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
- return err;
+ MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_teardown_hca_mbox_in in;
- struct mlx5_cmd_teardown_hca_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index 1368dac00da0..3a3b0005fd2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -39,36 +39,33 @@
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
- struct mlx5_mad_ifc_mbox_in *in = NULL;
- struct mlx5_mad_ifc_mbox_out *out = NULL;
- int err;
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- out = kzalloc(sizeof(*out), GFP_KERNEL);
- if (!out) {
- err = -ENOMEM;
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
goto out;
- }
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
- in->hdr.opmod = cpu_to_be16(opmod);
- in->port = port;
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ MLX5_SET(mad_ifc_in, in, port, port);
- memcpy(in->data, inb, sizeof(in->data));
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out;
- }
-
- memcpy(outb, out->data, sizeof(out->data));
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4f491d43e77d..d6446a3623b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -324,7 +324,7 @@ enum {
MLX5_DEV_CAP_FLAG_DCT,
};
-static u16 to_fw_pkey_sz(u32 size)
+static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
case 128:
@@ -340,7 +340,7 @@ static u16 to_fw_pkey_sz(u32 size)
case 4096:
return 5;
default:
- pr_warn("invalid pkey table size %d\n", size);
+ mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
return 0;
}
}
@@ -363,10 +363,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
- if (err)
- goto query_ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -409,20 +405,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
- int err;
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- return err;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
- return err;
+ return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
}
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
@@ -490,7 +477,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
- to_fw_pkey_sz(128));
+ to_fw_pkey_sz(dev, 128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -528,37 +515,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
@@ -758,44 +730,40 @@ clean:
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
- int err;
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
u32 sup_issi;
-
- memset(query_in, 0, sizeof(query_in));
- memset(query_out, 0, sizeof(query_out));
+ int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
-
- err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
if (err) {
- if (((struct mlx5_outbox_hdr *)query_out)->status ==
- MLX5_CMD_STAT_BAD_OP_ERR) {
+ u32 syndrome;
+ u8 status;
+
+ mlx5_cmd_mbox_status(query_out, &status, &syndrome);
+ if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
}
- pr_err("failed to query ISSI\n");
+ pr_err("failed to query ISSI err(%d)\n", err);
return err;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- memset(set_in, 0, sizeof(set_in));
- memset(set_out, 0, sizeof(set_out));
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
-
- err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
if (err) {
- pr_err("failed to set ISSI=1\n");
+ pr_err("failed to set ISSI=1 err(%d)\n", err);
return err;
}
@@ -1344,8 +1312,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
- pr_warn("selected profile out of range, selecting default (%d)\n",
- MLX5_DEFAULT_PROF);
+ mlx5_core_warn(dev,
+ "selected profile out of range, selecting default (%d)\n",
+ MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d5a0c2d61a18..ba2b09cc192f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -37,70 +37,30 @@
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
-struct mlx5_attach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_attach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
-struct mlx5_detach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_detach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_attach_mcg_mbox_in in;
- struct mlx5_attach_mcg_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ void *gid;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_detach_mcg_mbox_in in;
- struct mlx5_detach_mcg_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ void *gid;
- return err;
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 2f86ec6fcf25..ef0eeedcceb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -58,8 +58,8 @@ do { \
} while (0)
#define mlx5_core_err(__dev, format, ...) \
- dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
- (__dev)->priv.name, __func__, __LINE__, current->pid, \
+ dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
@@ -75,19 +75,6 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
-static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
- int in_size, u32 *out,
- int out_size)
-{
- int err;
-
- err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
-}
-
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 77a7293921d5..b9736f505bdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
}
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out)
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_create_mkey_mbox_out lout;
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 mkey_index;
+ void *mkc;
int err;
u8 key;
- memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock_irq(&dev->priv.mkey_lock);
- in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
- if (callback) {
- err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
- callback, context);
- return err;
- } else {
- err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
- }
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- if (err) {
- mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
- return err;
- }
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
- if (lout.hdr.status) {
- mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
- return mlx5_cmd_status_to_err(&lout.hdr);
- }
+ if (callback)
+ return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
+ callback, context);
+
+ err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
+ if (err)
+ return err;
- mkey->iova = be64_to_cpu(in->seg.start_addr);
- mkey->size = be64_to_cpu(in->seg.len);
- mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
- mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+ mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- be32_to_cpu(lout.mkey), key, mkey->key);
+ mkey_index, key, mkey->key);
/* connect to mkey tree */
write_lock_irq(&table->lock);
@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
+
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
+{
+ return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
+ NULL, 0, NULL, NULL);
+}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_destroy_mkey_mbox_in in;
- struct mlx5_destroy_mkey_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
write_lock_irqsave(&table->lock, flags);
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
return -ENOENT;
}
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_mkey_mbox_in in;
- int err;
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
- memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
+ MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
- struct mlx5_query_special_ctxs_mbox_in in;
- struct mlx5_query_special_ctxs_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *mkey = be32_to_cpu(out.dump_fill_mkey);
-
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+static inline u32 mlx5_get_psv(u32 *out, int psv_index)
+{
+ switch (psv_index) {
+ case 1: return MLX5_GET(create_psv_out, out, psv1_index);
+ case 2: return MLX5_GET(create_psv_out, out, psv2_index);
+ case 3: return MLX5_GET(create_psv_out, out, psv3_index);
+ default: return MLX5_GET(create_psv_out, out, psv0_index);
+ }
+}
+
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- struct mlx5_allocate_psv_in in;
- struct mlx5_allocate_psv_out out;
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
+ MLX5_SET(create_psv_in, in, pd, pdn);
+ MLX5_SET(create_psv_in, in, num_psv, npsvs);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
- in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "cmd exec failed %d\n", err);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n",
- out.hdr.status);
- return mlx5_cmd_status_to_err(&out.hdr);
- }
for (i = 0; i < npsvs; i++)
- sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
+ sig_index[i] = mlx5_get_psv(out, i);
return err;
}
@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- struct mlx5_destroy_psv_in in;
- struct mlx5_destroy_psv_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
- in.psv_number = cpu_to_be32(psv_num);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
- goto out;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n",
- out.hdr.status);
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto out;
- }
-
-out:
- return err;
+ MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, in, psvn, psv_num);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 32dea3524cee..673a7c96479a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -44,12 +44,6 @@ enum {
MLX5_PAGES_TAKE = 2
};
-enum {
- MLX5_BOOT_PAGES = 1,
- MLX5_INIT_PAGES = 2,
- MLX5_POST_INIT_PAGES = 3
-};
-
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
@@ -67,33 +61,6 @@ struct fw_page {
unsigned free_count;
};
-struct mlx5_query_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_pages;
-};
-
-struct mlx5_manage_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_entries;
- __be64 pas[0];
-};
-
-struct mlx5_manage_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be32 num_entries;
- u8 rsvd[4];
- __be64 pas[0];
-};
-
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- struct mlx5_query_pages_inbox in;
- struct mlx5_query_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
- in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+ MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
+ MLX5_SET(query_pages_in, in, op_mod, boot ?
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *npages = be32_to_cpu(out.num_pages);
- *func_id = be16_to_cpu(out.func_id);
+ *npages = MLX5_GET(query_pages_out, out, num_pages);
+ *func_id = MLX5_GET(query_pages_out, out, function_id);
return err;
}
@@ -280,46 +244,37 @@ out_alloc:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int err;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- in->func_id = cpu_to_be16(func_id);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
- if (!err)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
- mlx5_core_warn(dev, "page notify failed\n");
-
- kfree(in);
+ mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
+ func_id, err);
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
- int inlen;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
int err;
+ u32 *in;
int i;
- inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
+ inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
goto out_free;
}
- memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
retry:
@@ -332,27 +287,21 @@ retry:
goto retry;
}
- in->pas[i] = cpu_to_be64(addr);
+ MLX5_SET64(manage_pages_in, in, pas[i], addr);
}
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
- in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be32(npages);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_4k;
}
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_4k;
- }
-
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
@@ -364,7 +313,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, be64_to_cpu(in->pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
@@ -373,8 +322,7 @@ out_free:
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
- struct mlx5_manage_pages_inbox *in, int in_size,
- struct mlx5_manage_pages_outbox *out, int out_size)
+ u32 *in, int in_size, u32 *out, int out_size)
{
struct fw_page *fwp;
struct rb_node *p;
@@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
- (u32 *)out, out_size);
+ return mlx5_cmd_exec(dev, in, in_size, out, out_size);
- npages = be32_to_cpu(in->num_entries);
+ /* No hard feelings, we want our pages back! */
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
- out->pas[i] = cpu_to_be64(fwp->addr);
+ MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
p = rb_next(p);
i++;
}
- out->num_entries = cpu_to_be32(i);
+ MLX5_SET(manage_pages_out, out, output_num_entries, i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
- struct mlx5_manage_pages_inbox in;
- struct mlx5_manage_pages_outbox *out;
+ int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int num_claimed;
- int outlen;
- u64 addr;
+ u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
- memset(&in, 0, sizeof(in));
- outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
+ outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
- in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be32(npages);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
- num_claimed = be32_to_cpu(out->num_entries);
+ num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (num_claimed > npages) {
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
num_claimed, npages);
@@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- for (i = 0; i < num_claimed; i++) {
- addr = be64_to_cpu(out->pas[i]);
- free_4k(dev, addr);
- }
+ for (i = 0; i < num_claimed; i++)
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+
if (nclaimed)
*nclaimed = num_claimed;
@@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void)
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
- sizeof(struct mlx5_manage_pages_outbox)) /
- FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
+ MLX5_ST_SZ_BYTES(manage_pages_out)) /
+ MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index f2d3aee909e8..bd830d8d6c5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -36,66 +36,27 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-struct mlx5_alloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- struct mlx5_alloc_pd_mbox_in in;
- struct mlx5_alloc_pd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *pdn = be32_to_cpu(out.pdn) & 0xffffff;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- struct mlx5_dealloc_pd_mbox_in in;
- struct mlx5_dealloc_pd_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
- in.pdn = cpu_to_be32(pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 752c08127138..2ee28ad8f918 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -38,45 +38,42 @@
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
- u16 reg_num, int arg, int write)
+ u16 reg_id, int arg, int write)
{
- struct mlx5_access_reg_mbox_in *in = NULL;
- struct mlx5_access_reg_mbox_out *out = NULL;
+ int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
+ int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
int err = -ENOMEM;
+ u32 *out = NULL;
+ u32 *in = NULL;
+ void *data;
- in = mlx5_vzalloc(sizeof(*in) + size_in);
- if (!in)
- return -ENOMEM;
-
- out = mlx5_vzalloc(sizeof(*out) + size_out);
- if (!out)
- goto ex1;
-
- memcpy(in->data, data_in, size_in);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
- in->hdr.opmod = cpu_to_be16(!write);
- in->arg = cpu_to_be32(arg);
- in->register_id = cpu_to_be16(reg_num);
- err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(*out) + size_out);
- if (err)
- goto ex2;
+ in = mlx5_vzalloc(inlen);
+ out = mlx5_vzalloc(outlen);
+ if (!in || !out)
+ goto out;
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
+ data = MLX5_ADDR_OF(access_register_in, in, register_data);
+ memcpy(data, data_in, size_in);
- if (!err)
- memcpy(data_out, out->data, size_out);
+ MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
+ MLX5_SET(access_register_in, in, op_mod, !write);
+ MLX5_SET(access_register_in, in, argument, arg);
+ MLX5_SET(access_register_in, in, register_id, reg_id);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ if (err)
+ goto out;
-ex2:
+ data = MLX5_ADDR_OF(access_register_out, out, register_data);
+ memcpy(data_out, data, size_out);
+
+out:
kvfree(out);
-ex1:
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
-
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
-
return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
ptys_size, MLX5_REG_PTYS, 0, 0);
}
@@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
{
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
- u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(mlcr_reg, in, local_port, 1);
MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
@@ -246,15 +239,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
@@ -263,19 +253,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
-
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
@@ -284,13 +270,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, local_port, port);
-
mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 0);
@@ -304,14 +287,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
MLX5_SET(pmtu_reg, in, local_port, port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
@@ -333,15 +313,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
int module_mapping;
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmlp_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_PMLP, 0, 0);
if (err)
@@ -410,11 +387,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, local_port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
pvlc_size, MLX5_REG_PVLC, 0, 0);
}
@@ -460,10 +435,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
@@ -476,13 +450,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -500,10 +472,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
@@ -517,13 +488,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -567,12 +536,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
{
- u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
int err;
int i;
- memset(in, 0, sizeof(in));
for (i = 0; i < 8; i++) {
if (prio_tc[i] > mlx5_max_tc(mdev))
return -EINVAL;
@@ -617,11 +585,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
@@ -633,11 +599,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
@@ -651,12 +615,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
void *ets_tcn_conf;
int i;
- memset(in, 0, sizeof(in));
-
MLX5_SET(qetc_reg, in, port_number, 1);
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
@@ -701,35 +663,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
-
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
-
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
@@ -740,11 +691,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
-
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
outlen, MLX5_REG_PCMR, 0, 0);
}
@@ -759,12 +708,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable);
-
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b82d65802d96..d0a4005fe63a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
- int inlen)
+ u32 *in, int inlen)
{
- struct mlx5_create_qp_mbox_out out;
- struct mlx5_destroy_qp_mbox_in din;
- struct mlx5_destroy_qp_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err) {
- mlx5_core_warn(dev, "ret %d\n", err);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_warn(dev, "current num of QPs 0x%x\n",
- atomic_read(&dev->num_qps));
- return mlx5_cmd_status_to_err(&out.hdr);
- }
- qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
@@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- din.qpn = cpu_to_be32(qp->qpn);
- mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
-
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
@@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_destroy_qp_mbox_in in;
- struct mlx5_destroy_qp_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
mlx5_debug_qp_remove(dev, qp);
destroy_qprqsq_common(dev, qp);
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
atomic_dec(&dev->num_qps);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+struct mbox_info {
+ u32 *in;
+ u32 *out;
+ int inlen;
+ int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+ mbox->inlen = inlen;
+ mbox->outlen = outlen;
+ mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+ mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+ if (!mbox->in || !mbox->out) {
+ kfree(mbox->in);
+ kfree(mbox->out);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+ kfree(mbox->in);
+ kfree(mbox->out);
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+ u32 opt_param_mask, void *qpc,
+ struct mbox_info *mbox)
+{
+ mbox->out = NULL;
+ mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ) \
+ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+
+ switch (opcode) {
+ /* 2RST & 2ERR */
+ case MLX5_CMD_OP_2RST_QP:
+ if (MBOX_ALLOC(mbox, qp_2rst))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ if (MBOX_ALLOC(mbox, qp_2err))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+ break;
+
+ /* MODIFY with QPC */
+ case MLX5_CMD_OP_RST2INIT_QP:
+ if (MBOX_ALLOC(mbox, rst2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (MBOX_ALLOC(mbox, init2rtr_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ if (MBOX_ALLOC(mbox, rtr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (MBOX_ALLOC(mbox, rts2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ if (MBOX_ALLOC(mbox, init2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ default:
+ mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
+ opcode, qpn);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp)
{
- struct mlx5_modify_qp_mbox_out out;
- int err = 0;
+ struct mbox_info mbox;
+ int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(operation);
- in->qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
+ opt_param_mask, qpc, &mbox);
if (err)
return err;
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ mbox_free(&mbox);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
@@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
}
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_qp_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
- struct mlx5_alloc_xrcd_mbox_in in;
- struct mlx5_alloc_xrcd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
- else
- *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
-
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
{
- struct mlx5_dealloc_xrcd_mbox_in in;
- struct mlx5_dealloc_xrcd_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
- in.xrcdn = cpu_to_be32(xrcdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
@@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
u8 flags, int error)
{
- struct mlx5_page_fault_resume_mbox_in in;
- struct mlx5_page_fault_resume_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
- in.hdr.opmod = 0;
- flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
- MLX5_PAGE_FAULT_RESUME_WRITE |
- MLX5_PAGE_FAULT_RESUME_RDMA);
- flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
- in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
- (flags << MLX5_QPN_BITS));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
+
+ MLX5_SET(page_fault_resume_in, in, opcode,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, qpn, qpn);
+
+ if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
+ MLX5_SET(page_fault_resume_in, in, req_res, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
+ MLX5_SET(page_fault_resume_in, in, read_write, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
+ MLX5_SET(page_fault_resume_in, in, rdma, 1);
+ if (error)
+ MLX5_SET(page_fault_resume_in, in, error, 1);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
#endif
@@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*counter_id = MLX5_GET(alloc_q_counter_out, out,
counter_set_id);
@@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size)
{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
MLX5_SET(query_q_counter_in, in, clear, reset);
MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index c07c28bd3d55..104902a93a0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
MLX5_SET(set_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_RATE_LIMIT);
MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index b380a6bc1f85..0680dfb4bb1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -155,13 +155,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pdev))
pci_disable_sriov(pdev);
else
- pr_info("unloading PF driver while leaving orphan VFs\n");
+ mlx5_core_info(dev, "unloading PF driver while leaving orphan VFs\n");
return 0;
}
err = mlx5_core_sriov_enable(pdev, num_vfs);
if (err) {
- dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err);
+ mlx5_core_warn(dev, "mlx5_core_sriov_enable failed %d\n", err);
return err;
}
@@ -180,7 +180,8 @@ static int sync_required(struct pci_dev *pdev)
int cur_vfs = pci_num_vf(pdev);
if (cur_vfs != sriov->num_vfs) {
- pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs);
+ mlx5_core_warn(dev, "current VFs %d, registered %d - sync needed\n",
+ cur_vfs, sriov->num_vfs);
return 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c07f4d01b70e..3099630015d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
if (err)
goto out;
@@ -286,36 +285,30 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq, u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+
+ err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 28274a6fbafe..a00ff49eec18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,17 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqn = MLX5_GET(create_rq_out, out, rqn);
@@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
@@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*sqn = MLX5_GET(create_sq_out, out, sqn);
@@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
@@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
- u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn)
{
- u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
int err;
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
@@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ return mlx5_cmd_exec(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
- memset(in, 0, sizeof(in));
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
@@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *xsrqn)
{
- u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
int err;
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
@@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
void *srqc;
void *xrc_srqc;
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (!err) {
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
xrc_srq_context_entry);
@@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
- u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, in, op_mod,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
@@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5ff8af472bf5..ab0b896621a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -42,73 +42,28 @@ enum {
NUM_LOW_LAT_UUARS = 4,
};
-
-struct mlx5_alloc_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- struct mlx5_alloc_uar_mbox_in in;
- struct mlx5_alloc_uar_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
-
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto ex;
- }
-
- *uarn = be32_to_cpu(out.uarn) & 0xffffff;
-
-ex:
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- struct mlx5_free_uar_mbox_in in;
- struct mlx5_free_uar_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
- in.uarn = cpu_to_be32(uarn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
+ u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
-ex:
- return err;
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21365d06982b..525f17af108e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -39,10 +39,7 @@
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
- int err;
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
@@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
- int err;
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
MLX5_SET(modify_vport_state_in, in, vport_number, vport);
-
if (vport)
MLX5_SET(modify_vport_state_in, in, other_vport, 1);
-
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
}
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
@@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_ctx;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_inline, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ min_wqe_inline_mode, min_inline);
+
+ return mlx5_modify_nic_vport_context(mdev, in, inlen);
+}
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int *list_size)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
void *nic_vport_ctx;
int max_list_size;
int req_list_size;
@@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(curr_mac, addr_list[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
gid->global.subnet_prefix = tmp->global.subnet_prefix;
gid->global.interface_id = tmp->global.interface_id;
@@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
@@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
int is_group_manager;
void *out;
void *ctx;
@@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -752,9 +740,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
@@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
- if (err)
- goto free;
- err = mlx5_cmd_status_to_err_v2(out);
-
free:
kvfree(in);
return err;
@@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- goto ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
ex:
kfree(in);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index e25a73ed2981..07a9ba6cfc70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv)
static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- struct mlx5_outbox_hdr *hdr;
- int err;
-
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- hdr = (struct mlx5_outbox_hdr *)out;
- return hdr->status ? -ENOMEM : 0;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen);
+ u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out);
+ u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz);
+ u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..2575070c836e 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -198,19 +198,6 @@ enum {
};
enum {
- MLX5_ACCESS_MODE_PA = 0,
- MLX5_ACCESS_MODE_MTT = 1,
- MLX5_ACCESS_MODE_KLM = 2
-};
-
-enum {
- MLX5_MKEY_REMOTE_INVAL = 1 << 24,
- MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
- MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
-};
-
-enum {
MLX5_EN_RD = (u64)1,
MLX5_EN_WR = (u64)2
};
@@ -411,33 +398,6 @@ enum {
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
-struct mlx5_inbox_hdr {
- __be16 opcode;
- u8 rsvd[4];
- __be16 opmod;
-};
-
-struct mlx5_outbox_hdr {
- u8 status;
- u8 rsvd[3];
- __be32 syndrome;
-};
-
-struct mlx5_cmd_query_adapter_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_query_adapter_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[24];
- u8 intapin;
- u8 rsvd1[13];
- __be16 vsd_vendor_id;
- u8 vsd[208];
- u8 vsd_psid[16];
-};
-
enum mlx5_odp_transport_cap_bits {
MLX5_ODP_SUPPORT_SEND = 1 << 31,
MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
char reserved2[0xe4];
};
-struct mlx5_cmd_init_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_init_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-
struct health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
struct mlx5_cqe64 cqe64;
};
-struct mlx5_srq_ctx {
- u8 state_log_sz;
- u8 rsvd0[3];
- __be32 flags_xrcd;
- __be32 pgoff_cqn;
- u8 rsvd1[4];
- u8 log_pg_sz;
- u8 rsvd2[7];
- __be32 pd;
- __be16 lwm;
- __be16 wqe_cnt;
- u8 rsvd3[8];
- __be64 db_record;
-};
-
-struct mlx5_create_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_srqn;
- u8 rsvd0[4];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[208];
- __be64 pas[0];
-};
-
-struct mlx5_create_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[32];
- __be64 pas[0];
-};
-
-struct mlx5_arm_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- __be16 rsvd;
- __be16 lwm;
-};
-
-struct mlx5_arm_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cq_context {
- u8 status;
- u8 cqe_sz_flags;
- u8 st;
- u8 rsvd3;
- u8 rsvd4[6];
- __be16 page_offset;
- __be32 log_sz_usr_page;
- __be16 cq_period;
- __be16 cq_max_count;
- __be16 rsvd20;
- __be16 c_eqn;
- u8 log_pg_sz;
- u8 rsvd25[7];
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_counter;
- __be32 producer_counter;
- u8 rsvd48[8];
- __be64 db_record_addr;
-};
-
-struct mlx5_create_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_cqn;
- u8 rsvdx[4];
- struct mlx5_cq_context ctx;
- u8 rsvd6[192];
- __be64 pas[0];
-};
-
-struct mlx5_create_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_cq_context ctx;
- u8 rsvd6[16];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- __be32 field_select;
- struct mlx5_cq_context ctx;
- u8 rsvd[192];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_eq_context {
- u8 status;
- u8 ec_oi;
- u8 st;
- u8 rsvd2[7];
- __be16 page_pffset;
- __be32 log_sz_usr_page;
- u8 rsvd3[7];
- u8 intr;
- u8 log_page_size;
- u8 rsvd4[15];
- __be32 consumer_counter;
- __be32 produser_counter;
- u8 rsvd5[16];
-};
-
-struct mlx5_create_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 input_eqn;
- u8 rsvd1[4];
- struct mlx5_eq_context ctx;
- u8 rsvd2[8];
- __be64 events_mask;
- u8 rsvd3[176];
- __be64 pas[0];
-};
-
-struct mlx5_create_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[3];
- u8 eq_number;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_map_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be64 mask;
- u8 mu;
- u8 rsvd0[2];
- u8 eqn;
- u8 rsvd1[24];
-};
-
-struct mlx5_map_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_query_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_eq_context ctx;
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
};
enum {
- MLX5_MKEY_STATUS_FREE = 1 << 6,
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
u8 rsvd4[4];
};
-struct mlx5_query_special_ctxs_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_special_ctxs_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 dump_fill_mkey;
- __be32 reserved_lkey;
-};
-
-struct mlx5_create_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_mkey_index;
- __be32 flags;
- struct mlx5_mkey_seg seg;
- u8 rsvd1[16];
- __be32 xlat_oct_act_size;
- __be32 rsvd2;
- u8 rsvd3[168];
- __be64 pas[0];
-};
-
-struct mlx5_create_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_query_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_dump_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
-};
-
-struct mlx5_dump_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_mad_ifc_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be16 remote_lid;
- u8 rsvd0;
- u8 port;
- u8 rsvd1[4];
- u8 data[256];
-};
-
-struct mlx5_mad_ifc_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- u8 data[256];
-};
-
-struct mlx5_access_reg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 register_id;
- __be32 arg;
- __be32 data[0];
-};
-
-struct mlx5_access_reg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 data[0];
-};
-
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
-struct mlx5_allocate_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 npsv_pd;
- __be32 rsvd_psv0;
-};
-
-struct mlx5_allocate_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 psv_idx[4];
-};
-
-struct mlx5_destroy_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 psv_number;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..ebe57abf3324 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -771,14 +771,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
-int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +808,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out);
+ u32 *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -865,7 +869,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 21bc4557b67a..3766110e13ea 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -212,6 +212,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_MAX
};
@@ -281,7 +283,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 reserved_at_7[0x19];
+ u8 encap[0x1];
+ u8 decap[0x1];
+ u8 reserved_at_9[0x17];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -512,7 +516,15 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
- u8 reserved_at_20[0x7e0];
+ u8 vxlan_encap_decap[0x1];
+ u8 nvgre_encap_decap[0x1];
+ u8 reserved_at_22[0x9];
+ u8 log_max_encap_headers[0x5];
+ u8 reserved_2b[0x6];
+ u8 max_encap_header_size[0xa];
+
+ u8 reserved_40[0x7c0];
+
};
struct mlx5_ifc_qos_cap_bits {
@@ -1966,7 +1978,10 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3e0[0x8];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x40];
+ u8 reserved_at_400[0x8];
+ u8 deth_sqpn[0x18];
+
+ u8 reserved_at_420[0x20];
u8 reserved_at_440[0x8];
u8 last_acked_psn[0x18];
@@ -2064,6 +2079,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+ MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
};
struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2100,9 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 reserved_at_c0[0x140];
+ u8 encap_id[0x20];
+
+ u8 reserved_at_e0[0x120];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -3489,7 +3508,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
+ u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
};
@@ -4213,6 +4232,85 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_encap_header_in_bits {
+ u8 reserved_at_0[0x5];
+ u8 header_type[0x3];
+ u8 reserved_at_8[0xe];
+ u8 encap_header_size[0xa];
+
+ u8 reserved_at_20[0x10];
+ u8 encap_header[2][0x8];
+
+ u8 more_encap_header[0][0x8];
+};
+
+struct mlx5_ifc_query_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header[0];
+};
+
+struct mlx5_ifc_query_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
+struct mlx5_ifc_alloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header;
+};
+
+struct mlx5_ifc_dealloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4721,7 +4819,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x16];
u8 node_guid[0x1];
u8 port_guid[0x1];
- u8 reserved_at_18[0x1];
+ u8 min_inline[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
@@ -6099,7 +6197,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_a0[0x20];
- u8 reserved_at_c0[0x4];
+ u8 encap_en[0x1];
+ u8 decap_en[0x1];
+ u8 reserved_at_c2[0x2];
u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_at_d0[0x8];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
};
enum {
- MLX5_NON_ZERO_RQ = 0 << 24,
- MLX5_SRQ_RQ = 1 << 24,
- MLX5_CRQ_RQ = 2 << 24,
- MLX5_ZERO_LEN_RQ = 3 << 24
+ MLX5_NON_ZERO_RQ = 0x0,
+ MLX5_SRQ_RQ = 0x1,
+ MLX5_CRQ_RQ = 0x2,
+ MLX5_ZERO_LEN_RQ = 0x3
};
+/* TODO REM */
enum {
/* params1 */
MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
};
enum {
- MLX5_QP_LAT_SENSITIVE = 1 << 28,
- MLX5_QP_BLOCK_MCAST = 1 << 30,
- MLX5_QP_ENABLE_SIG = 1 << 31,
-};
-
-enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
u8 rmac[6];
};
+/* FIXME: use mlx5_ifc.h qpc */
struct mlx5_qp_context {
__be32 flags;
__be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
u8 rsvd1[24];
};
-struct mlx5_create_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_qpn;
- u8 rsvd0[4];
- __be32 opt_param_mask;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd3[16];
- __be64 pas[0];
-};
-
-struct mlx5_create_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_modify_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
- __be32 optparam;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
-};
-
-struct mlx5_modify_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[4];
-};
-
-struct mlx5_query_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd1[8];
- __be32 optparam;
- u8 rsvd0[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
- __be64 pas[0];
-};
-
-struct mlx5_conf_sqp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[3];
- u8 type;
-};
-
-struct mlx5_conf_sqp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
-struct mlx5_page_fault_resume_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 flags_qpn;
- u8 reserved[4];
-};
-
-struct mlx5_page_fault_resume_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);