voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
PCI_VSEC_ID_INTEL_DFLS); if (!voff) {
dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__); return -ENODEV;
}
dfl_cnt = 0;
pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt); if (dfl_cnt > PCI_STD_NUM_BARS) {
dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
__func__, dfl_cnt, PCI_STD_NUM_BARS); return -EINVAL;
}
dfl_res_off = voff + PCI_VNDR_DFLS_RES; if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
__func__); return -EINVAL;
}
for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
dfl_res = GENMASK(31, 0);
pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK; if (bir >= PCI_STD_NUM_BARS) {
dev_err(&pcidev->dev, "%s bad bir number %d\n",
__func__, bir); return -EINVAL;
}
if (bars & BIT(bir)) {
dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
__func__, bir); return -EINVAL;
}
bars |= BIT(bir);
len = pci_resource_len(pcidev, bir);
offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK; if (offset >= len) {
dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
__func__, offset, &len); return -EINVAL;
}
dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
len -= offset;
start = pci_resource_start(pcidev, bir) + offset;
dfl_fpga_enum_info_add_dfl(info, start, len);
}
return 0;
}
/* default method of finding dfls starting at offset 0 of bar 0 */ staticint find_dfls_by_default(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
{ int port_num, bar, i, ret = 0;
resource_size_t start, len; void __iomem *base;
u32 offset;
u64 v;
/* start to find Device Feature List from Bar 0 */
base = pcim_iomap_region(pcidev, 0, DRV_NAME); if (IS_ERR(base)) return PTR_ERR(base);
/* * PF device has FME and Ports/AFUs, and VF device only has one * Port/AFU. Check them and add related "Device Feature List" info * for the next step enumeration.
*/ if (dfl_feature_is_fme(base)) {
start = pci_resource_start(pcidev, 0);
len = pci_resource_len(pcidev, 0);
dfl_fpga_enum_info_add_dfl(info, start, len);
/* * find more Device Feature Lists (e.g. Ports) per information * indicated by FME module.
*/
v = readq(base + FME_HDR_CAP);
port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
for (i = 0; i < port_num; i++) {
v = readq(base + FME_HDR_PORT_OFST(i));
/* skip ports which are not implemented. */ if (!(v & FME_PORT_OFST_IMP)) continue;
/* * add Port's Device Feature List information for next * step enumeration.
*/
bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v); if (bar == FME_PORT_OFST_BAR_SKIP) { continue;
} elseif (bar >= PCI_STD_NUM_BARS) {
dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
bar, i);
ret = -EINVAL; break;
}
dfl_fpga_enum_info_add_dfl(info, start, len);
} else {
ret = -ENODEV;
}
/* release I/O mappings for next step enumeration */
pcim_iounmap_region(pcidev, 0);
return ret;
}
/* enumerate feature devices under pci device */ staticint cci_enumerate_feature_devs(struct pci_dev *pcidev)
{ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_enum_info *info; struct dfl_fpga_cdev *cdev; int nvec, ret = 0; int *irq_table;
/* allocate enumeration info via pci_dev */
info = dfl_fpga_enum_info_alloc(&pcidev->dev); if (!info) return -ENOMEM;
/* add irq info for enumeration if the device support irq */
nvec = cci_pci_alloc_irq(pcidev); if (nvec < 0) {
dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
ret = nvec; goto enum_info_free_exit;
} elseif (nvec) {
irq_table = cci_pci_create_irq_table(pcidev, nvec); if (!irq_table) {
ret = -ENOMEM; goto irq_free_exit;
}
ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
kfree(irq_table); if (ret) goto irq_free_exit;
}
ret = find_dfls_by_vsec(pcidev, info); if (ret == -ENODEV)
ret = find_dfls_by_default(pcidev, info);
if (ret) goto irq_free_exit;
/* start enumeration with prepared enumeration information */
cdev = dfl_fpga_feature_devs_enumerate(info); if (IS_ERR(cdev)) {
dev_err(&pcidev->dev, "Enumeration failure\n");
ret = PTR_ERR(cdev); goto irq_free_exit;
}
drvdata->cdev = cdev;
irq_free_exit: if (ret)
cci_pci_free_irq(pcidev);
enum_info_free_exit:
dfl_fpga_enum_info_free(info);
return ret;
}
static int cci_pci_probe(struct pci_dev *pcidev, conststruct pci_device_id *pcidevid)
{ int ret;
ret = pcim_enable_device(pcidev); if (ret < 0) {
dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret); return ret;
}
pci_set_master(pcidev);
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); if (ret)
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); if (ret) {
dev_err(&pcidev->dev, "No suitable DMA support available.\n"); return ret;
}
ret = cci_init_drvdata(pcidev); if (ret) {
dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret); return ret;
}
ret = cci_enumerate_feature_devs(pcidev); if (ret) {
dev_err(&pcidev->dev, "enumeration failure %d.\n", ret); return ret;
}
if (!num_vfs) { /* * disable SRIOV and then put released ports back to default * PF access mode.
*/
pci_disable_sriov(pcidev);
dfl_fpga_cdev_config_ports_pf(cdev);
} else { int ret;
/* * before enable SRIOV, put released ports into VF access mode * first of all.
*/
ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs); if (ret) return ret;
ret = pci_enable_sriov(pcidev, num_vfs); if (ret) {
dfl_fpga_cdev_config_ports_pf(cdev); return ret;
}
}
return num_vfs;
}
staticvoid cci_pci_remove(struct pci_dev *pcidev)
{ if (dev_is_pf(&pcidev->dev))
cci_pci_sriov_configure(pcidev, 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.