// SPDX-License-Identifier: GPL-2.0-only /* * vDPA bus. * * Copyright (c) 2020, Red Hat. All rights reserved. * Author: Jason Wang <jasowang@redhat.com> *
*/
static LIST_HEAD(mdev_head); /* A global mutex that protects vdpa management device and device level operations. */ static DECLARE_RWSEM(vdpa_dev_lock); static DEFINE_IDA(vdpa_index_ida);
/** * __vdpa_alloc_device - allocate and initilaize a vDPA device * This allows driver to some prepartion after device is * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device * @ngroups: number of groups supported by this device * @nas: number of address spaces supported by this device * @size: size of the parent structure that contains private data * @name: name of the vdpa device; optional. * @use_va: indicate whether virtual address must be used by this device * * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. * * Return: Returns an error when parent/config/dma_dev is not set or fail to get * ida.
*/ struct vdpa_device *__vdpa_alloc_device(struct device *parent, conststruct vdpa_config_ops *config, unsignedint ngroups, unsignedint nas,
size_t size, constchar *name, bool use_va)
{ struct vdpa_device *vdev; int err = -EINVAL;
if (!config) goto err;
if (!!config->dma_map != !!config->dma_unmap) goto err;
/* It should only work for the device that use on-chip IOMMU */ if (use_va && !(config->dma_map || config->set_map)) goto err;
lockdep_assert_held(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); if (dev) {
put_device(dev); return -EEXIST;
} return device_add(&vdev->dev);
}
/** * _vdpa_register_device - register a vDPA device with vdpa lock held * Caller must have a succeed call of vdpa_alloc_device() before. * Caller must invoke this routine in the management device dev_add() * callback after setting up valid mgmtdev for this vdpa device. * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * * Return: Returns an error when fail to add device to vDPA bus
*/ int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{ if (!vdev->mdev) return -EINVAL;
/** * vdpa_register_device - register a vDPA device * Callers must have a succeed call of vdpa_alloc_device() before. * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * * Return: Returns an error when fail to add to vDPA bus
*/ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{ int err;
/** * _vdpa_unregister_device - unregister a vDPA device * Caller must invoke this routine as part of management device dev_del() * callback. * @vdev: the vdpa device to be unregisted from vDPA bus
*/ void _vdpa_unregister_device(struct vdpa_device *vdev)
{
lockdep_assert_held(&vdpa_dev_lock);
WARN_ON(!vdev->mdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
/** * vdpa_unregister_device - unregister a vDPA device * @vdev: the vdpa device to be unregisted from vDPA bus
*/ void vdpa_unregister_device(struct vdpa_device *vdev)
{
down_write(&vdpa_dev_lock);
device_unregister(&vdev->dev);
up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
/** * __vdpa_register_driver - register a vDPA device driver * @drv: the vdpa device driver to be registered * @owner: module owner of the driver * * Return: Returns an err when fail to do the registration
*/ int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
{
drv->driver.bus = &vdpa_bus;
drv->driver.owner = owner;
/* * Config accesses aren't supposed to trigger before features are set. * If it does happen we assume a legacy guest.
*/ if (!vdev->features_valid)
vdpa_set_features_unlocked(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
/** * vdpa_get_config - Get one or more device configuration fields. * @vdev: vdpa device to operate on * @offset: starting byte offset of the field * @buf: buffer pointer to read to * @len: length of the configuration fields in bytes
*/ void vdpa_get_config(struct vdpa_device *vdev, unsignedint offset, void *buf, unsignedint len)
{
down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len);
up_read(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
/** * vdpa_set_config - Set one or more device configuration fields. * @vdev: vdpa device to operate on * @offset: starting byte offset of the field * @buf: buffer pointer to read from * @length: length of the configuration fields in bytes
*/ void vdpa_set_config(struct vdpa_device *vdev, unsignedint offset, constvoid *buf, unsignedint length)
{
down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length);
up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_set_config);
staticbool mgmtdev_handle_match(conststruct vdpa_mgmt_dev *mdev, constchar *busname, constchar *devname)
{ /* Bus name is optional for simulated management device, so ignore the * device with bus if bus attribute is provided.
*/ if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) returnfalse;
if (!busname && strcmp(dev_name(mdev->device), devname) == 0) returntrue;
/* * Bitmask for all per-device features: feature bits VIRTIO_TRANSPORT_F_START * through VIRTIO_TRANSPORT_F_END are unset, i.e. 0xfffffc000fffffff for * all 64bit features. If the features are extended beyond 64 bits, or new * "holes" are reserved for other type of features than per-device, this * macro would have to be updated.
*/ #define VIRTIO_DEVICE_F_MASK (~0ULL << (VIRTIO_TRANSPORT_F_END + 1) | \
((1ULL << VIRTIO_TRANSPORT_F_START) - 1))
if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
} if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
config.net.mtu =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
} if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
config.net.max_vq_pairs =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]); if (!config.net.max_vq_pairs) {
NL_SET_ERR_MSG_MOD(info->extack, "At least one pair of VQs is required"); return -EINVAL;
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
} if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
u64 missing = 0x0ULL;
config.device_features =
nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR] &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MAC)))
missing |= BIT_ULL(VIRTIO_NET_F_MAC); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU] &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MTU)))
missing |= BIT_ULL(VIRTIO_NET_F_MTU); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] &&
config.net.max_vq_pairs > 1 &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MQ)))
missing |= BIT_ULL(VIRTIO_NET_F_MQ); if (missing) {
NL_SET_ERR_MSG_FMT_MOD(info->extack, "Missing features 0x%llx for provided attributes",
missing); return -EINVAL;
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
}
/* Skip checking capability if user didn't prefer to configure any * device networking attributes. It is likely that user might have used * a device specific method to configure such attributes or using device * default attributes.
*/ if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
down_write(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs); if (IS_ERR(mdev)) {
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
err = PTR_ERR(mdev); goto err;
}
if ((config.mask & mdev->config_attr_mask) != config.mask) {
NL_SET_ERR_MSG_FMT_MOD(info->extack, "Some provided attributes are not supported: 0x%llx",
config.mask & ~mdev->config_attr_mask);
err = -EOPNOTSUPP; goto err;
}
classes = vdpa_mgmtdev_get_classes(mdev, &ncls); if (config.mask & VDPA_DEV_NET_ATTRS_MASK &&
!(classes & BIT_ULL(VIRTIO_ID_NET))) {
NL_SET_ERR_MSG_MOD(info->extack, "Network class attributes provided on unsupported management device");
err = -EINVAL; goto err;
} if (!(config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
config.mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES) &&
classes & BIT_ULL(VIRTIO_ID_NET) && ncls > 1 &&
config.device_features & VIRTIO_DEVICE_F_MASK) {
NL_SET_ERR_MSG_MOD(info->extack, "Management device supports multi-class while device features specified are ambiguous");
err = -EINVAL; goto err;
}
/* only read driver features after the feature negotiation is done */
status = vdev->config->get_status(vdev); if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
features_driver = vdev->config->get_driver_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
VDPA_ATTR_PAD)) {
err = -EMSGSIZE; goto msg_err;
}
}
switch (device_id) { case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg); break; case VIRTIO_ID_BLOCK:
err = vdpa_dev_blk_config_fill(vdev, msg); break; default:
err = -EOPNOTSUPP; break;
} if (err) goto msg_err;
status = vdev->config->get_status(vdev); if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete"); return -EAGAIN;
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
features = vdev->config->get_driver_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
features, VDPA_ATTR_PAD)) return -EMSGSIZE;
err = vdpa_dev_net_mq_config_fill(msg, features, &config); if (err) return err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) return -EMSGSIZE;
err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack); if (err) return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.