diff options
author | Parav Pandit <parav@nvidia.com> | 2021-10-26 20:55:13 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2021-11-01 12:26:49 +0300 |
commit | ad69dd0bf26b88ec6ab26f8bbe5cd74fbed7672a (patch) | |
tree | f375c7c9319917f9c300fc2a50589cea4107e14c /drivers/vdpa | |
parent | 6dbb1f1687a2ccdfc5b84b0a35bbc6dfefc4de3b (diff) | |
download | linux-ad69dd0bf26b88ec6ab26f8bbe5cd74fbed7672a.tar.xz |
vdpa: Introduce query of device config layout
Introduce a command to query a device config layout.
An example query of network vdpa device:
$ vdpa dev add name bar mgmtdev vdpasim_net
$ vdpa dev config show
bar: mac 00:35:09:19:48:05 link up link_announce false mtu 1500
$ vdpa dev config show -jp
{
"config": {
"bar": {
"mac": "00:35:09:19:48:05",
"link ": "up",
"link_announce ": false,
"mtu": 1500,
}
}
}
Signed-off-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Eli Cohen <elic@nvidia.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20211026175519.87795-3-parav@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/vdpa.c | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index cbc8fc69cf9b..8fcbdda8590c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -14,6 +14,8 @@ #include <uapi/linux/vdpa.h> #include <net/genetlink.h> #include <linux/mod_devicetable.h> +#include <linux/virtio_net.h> +#include <linux/virtio_ids.h> static LIST_HEAD(mdev_head); /* A global mutex that protects vdpa management device and device level operations. */ @@ -66,6 +68,7 @@ static void vdpa_release_dev(struct device *d) ops->free(vdev); ida_simple_remove(&vdpa_index_ida, vdev->index); + mutex_destroy(&vdev->cf_mutex); kfree(vdev); } @@ -127,6 +130,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, if (err) goto err_name; + mutex_init(&vdev->cf_mutex); device_initialize(&vdev->dev); return vdev; @@ -309,6 +313,7 @@ void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, { const struct vdpa_config_ops *ops = vdev->config; + mutex_lock(&vdev->cf_mutex); /* * Config accesses aren't supposed to trigger before features are set. * If it does happen we assume a legacy guest. @@ -316,6 +321,7 @@ void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, if (!vdev->features_valid) vdpa_set_features(vdev, 0); ops->get_config(vdev, offset, buf, len); + mutex_unlock(&vdev->cf_mutex); } EXPORT_SYMBOL_GPL(vdpa_get_config); @@ -329,7 +335,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config); void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf, unsigned int length) { + mutex_lock(&vdev->cf_mutex); vdev->config->set_config(vdev, offset, buf, length); + mutex_unlock(&vdev->cf_mutex); } EXPORT_SYMBOL_GPL(vdpa_set_config); @@ -661,6 +669,168 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba return msg->len; } +static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev, + struct sk_buff *msg, u64 features, + const struct virtio_net_config *config) +{ + u16 val_u16; + + if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0) + return 0; + + val_u16 = le16_to_cpu(config->max_virtqueue_pairs); + return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16); +} + +static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) +{ + struct virtio_net_config config = {}; + u64 features; + u16 val_u16; + + vdpa_get_config(vdev, 0, &config, sizeof(config)); + + if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac), + config.mac)) + return -EMSGSIZE; + + val_u16 = le16_to_cpu(config.status); + if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16)) + return -EMSGSIZE; + + val_u16 = le16_to_cpu(config.mtu); + if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16)) + return -EMSGSIZE; + + features = vdev->config->get_features(vdev); + + return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config); +} + +static int +vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, + int flags, struct netlink_ext_ack *extack) +{ + u32 device_id; + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, + VDPA_CMD_DEV_CONFIG_GET); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { + err = -EMSGSIZE; + goto msg_err; + } + + device_id = vdev->config->get_device_id(vdev); + if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { + err = -EMSGSIZE; + goto msg_err; + } + + switch (device_id) { + case VIRTIO_ID_NET: + err = vdpa_dev_net_config_fill(vdev, msg); + break; + default: + err = -EOPNOTSUPP; + break; + } + if (err) + goto msg_err; + + genlmsg_end(msg, hdr); + return 0; + +msg_err: + genlmsg_cancel(msg, hdr); + return err; +} + +static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct vdpa_device *vdev; + struct sk_buff *msg; + const char *devname; + struct device *dev; + int err; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + mutex_lock(&vdpa_dev_mutex); + dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); + if (!dev) { + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); + err = -ENODEV; + goto dev_err; + } + vdev = container_of(dev, struct vdpa_device, dev); + if (!vdev->mdev) { + NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); + err = -EINVAL; + goto mdev_err; + } + err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq, + 0, info->extack); + if (!err) + err = genlmsg_reply(msg, info); + +mdev_err: + put_device(dev); +dev_err: + mutex_unlock(&vdpa_dev_mutex); + if (err) + nlmsg_free(msg); + return err; +} + +static int vdpa_dev_config_dump(struct device *dev, void *data) +{ + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); + struct vdpa_dev_dump_info *info = data; + int err; + + if (!vdev->mdev) + return 0; + if (info->idx < info->start_idx) { + info->idx++; + return 0; + } + err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, + info->cb->nlh->nlmsg_seq, NLM_F_MULTI, + info->cb->extack); + if (err) + return err; + + info->idx++; + return 0; +} + +static int +vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct vdpa_dev_dump_info info; + + info.msg = msg; + info.cb = cb; + info.start_idx = cb->args[0]; + info.idx = 0; + + mutex_lock(&vdpa_dev_mutex); + bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump); + mutex_unlock(&vdpa_dev_mutex); + cb->args[0] = info.idx; + return msg->len; +} + static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, @@ -692,6 +862,12 @@ static const struct genl_ops vdpa_nl_ops[] = { .doit = vdpa_nl_cmd_dev_get_doit, .dumpit = vdpa_nl_cmd_dev_get_dumpit, }, + { + .cmd = VDPA_CMD_DEV_CONFIG_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_dev_config_get_doit, + .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, + }, }; static struct genl_family vdpa_nl_family __ro_after_init = { |