driver_features = pds_vdpa_get_driver_features(vdpa_dev); if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
invert_idx = PDS_VDPA_PACKED_INVERT_IDX;
if (ready) { /* Pass vq setup info to DSC using adminq to gather up and * send all info at once so FW can do its full set up in * one easy operation
*/
err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); if (err) {
dev_err(dev, "Failed to init vq %d: %pe\n",
qid, ERR_PTR(err));
ready = false;
}
} else {
err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, qid, ERR_PTR(err));
}
/* The avail and used index are stored with the packed wrap * counter bit inverted. This way, in case set_vq_state is * not called, the initial value can be set to zero prior to * feature negotiation, and it is good for both packed and * split vq.
*/
avail ^= PDS_VDPA_PACKED_INVERT_IDX;
used ^= PDS_VDPA_PACKED_INVERT_IDX;
} else {
avail = state->split.avail_index; /* state->split does not provide a used_index: * the vq will be set to "empty" here, and the vq will read * the current used index the next time the vq is kicked.
*/
used = avail;
}
if (used != avail) {
dev_dbg(dev, "Setting used equal to avail, for interoperability\n");
used = avail;
}
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); return -EOPNOTSUPP;
}
/* Check for valid feature bits */
nego_features = features & pdsv->supported_features;
missing = features & ~nego_features; if (missing) {
dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n",
features, missing); return -EOPNOTSUPP;
}
/* if we're faking the F_MAC, strip it before writing to device */
hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC)))
nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC);
for (i = 0; i < pdsv->num_vqs; i++) {
pdsv->vqs[i].avail_idx = 0;
pdsv->vqs[i].used_idx = 0;
}
pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
}
if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { for (i = 0; i < pdsv->num_vqs; i++) {
pdsv->vqs[i].notify =
vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
i, &pdsv->vqs[i].notify_pa);
}
}
if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK)
pds_vdpa_release_irqs(pdsv);
}
dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
status = pds_vdpa_get_status(vdpa_dev);
if (status == 0) return 0;
if (status & VIRTIO_CONFIG_S_DRIVER_OK) { /* Reset the vqs */ for (i = 0; i < pdsv->num_vqs && !err; i++) {
err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]); if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, i, ERR_PTR(err));
}
}
pds_vdpa_set_status(vdpa_dev, 0);
if (status & VIRTIO_CONFIG_S_DRIVER_OK) { /* Reset the vq info */ for (i = 0; i < pdsv->num_vqs && !err; i++)
pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify);
}
/* Make sure we have the queues being requested */ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
vq_pairs = add_config->net.max_vq_pairs;
pdsv->num_vqs = 2 * vq_pairs; if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
pdsv->num_vqs++;
if (pdsv->num_vqs > fw_max_vqs) {
dev_err(dev, "%s: queue count requested %u greater than max %u\n",
__func__, pdsv->num_vqs, fw_max_vqs);
err = -ENOSPC; goto err_unmap;
}
if (pdsv->num_vqs != fw_max_vqs) {
err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs); if (err) {
dev_err(dev, "Failed to set max_vq_pairs: %pe\n",
ERR_PTR(err)); goto err_unmap;
}
}
/* Set a mac, either from the user config if provided * or use the device's mac if not 00:..:00 * or set a random mac
*/ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
ether_addr_copy(pdsv->mac, add_config->net.mac);
} else { struct virtio_net_config __iomem *vc;
vc = pdsv->vdpa_aux->vd_mdev.device;
memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac)); if (is_zero_ether_addr(pdsv->mac) &&
(pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) {
eth_random_addr(pdsv->mac);
dev_info(dev, "setting random mac %pM\n", pdsv->mac);
}
}
pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
for (i = 0; i < pdsv->num_vqs; i++) { void __iomem *notify;
notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
i, &pdsv->vqs[i].notify_pa);
pds_vdpa_init_vqs_entry(pdsv, i, notify);
}
pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev;
err = pds_vdpa_register_event_handler(pdsv); if (err) {
dev_err(dev, "Failed to register for PDS events: %pe\n", ERR_PTR(err)); goto err_unmap;
}
/* We use the _vdpa_register_device() call rather than the * vdpa_register_device() to avoid a deadlock because our * dev_add() is called with the vdpa_dev_lock already set * by vdpa_nl_cmd_dev_add_set_doit()
*/
err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs); if (err) {
dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err)); goto err_unevent;
}
dev = &vdpa_aux->padev->aux_dev.dev;
pdev = vdpa_aux->padev->vf_pdev;
mgmt = &vdpa_aux->vdpa_mdev;
/* Get resource info through the PF's adminq. It is a block of info, * so we need to map some memory for PF to make available to the * firmware for writing the data.
*/
pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev);
pf_dev = &pf_pdev->dev;
ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident, sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); if (dma_mapping_error(pf_dev, ident_pa)) {
dev_err(dev, "Failed to map ident space\n"); return -ENOMEM;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.