/** * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer * data between PCIe EP and remote PCIe RC * @epf_test: the EPF test device that performs the data transfer operation * @dma_dst: The destination address of the data transfer. It can be a physical * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. * @dma_src: The source address of the data transfer. It can be a physical * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. * @len: The size of the data transfer * @dma_remote: remote RC physical address * @dir: DMA transfer direction * * Function that uses dmaengine API to transfer data between PCIe EP and remote * PCIe RC. The source and destination address can be a physical address given * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. * * The function returns '0' on success and negative value on failure.
*/ staticint pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, dma_addr_t dma_remote, enum dma_transfer_direction dir)
{ struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
epf_test->dma_chan_tx : epf_test->dma_chan_rx;
dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst; enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; struct pci_epf *epf = epf_test->epf; struct dma_async_tx_descriptor *tx; struct dma_slave_config sconf = {}; struct device *dev = &epf->dev; int ret;
/** * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel * @epf_test: the EPF test device that performs data transfer operation * * Function to initialize EPF test DMA channel.
*/ staticint pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{ struct pci_epf *epf = epf_test->epf; struct device *dev = &epf->dev; struct epf_dma_filter filter; struct dma_chan *dma_chan;
dma_cap_mask_t mask; int ret;
dma_chan = dma_request_chan_by_mask(&mask); if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan); if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get DMA channel\n"); return ret;
}
init_completion(&epf_test->transfer_complete);
/** * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel * @epf_test: the EPF test device that performs data transfer operation * * Helper to cleanup EPF test DMA channel.
*/ staticvoid pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
{ if (!epf_test->dma_supported) return;
src_buf = kzalloc(src_size, GFP_KERNEL); if (!src_buf) {
ret = -ENOMEM; goto set_status;
}
buf = src_buf;
while (src_size) {
ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
src_addr, src_size, &map); if (ret) {
dev_err(dev, "Failed to map address\n");
status = STATUS_SRC_ADDR_INVALID; goto free_buf;
}
map_size = map.pci_size; if (flags & FLAG_USE_DMA) {
dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dst_phys_addr)) {
dev_err(dev, "Failed to map destination buffer addr\n");
ret = -ENOMEM; goto unmap;
}
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test,
dst_phys_addr, map.phys_addr,
map_size, src_addr, DMA_DEV_TO_MEM); if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
while (dst_size) {
ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
dst_addr, dst_size, &map); if (ret) {
dev_err(dev, "Failed to map address\n");
status = STATUS_DST_ADDR_INVALID; goto free_buf;
}
map_size = map.pci_size; if (flags & FLAG_USE_DMA) {
src_phys_addr = dma_map_single(dma_dev, buf, map_size,
DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, src_phys_addr)) {
dev_err(dev, "Failed to map source buffer addr\n");
ret = -ENOMEM; goto unmap;
}
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test,
map.phys_addr, src_phys_addr,
map_size, dst_addr,
DMA_MEM_TO_DEV); if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
/* * Set the status before raising the IRQ to ensure that the host sees * the updated value when it gets the IRQ.
*/
status |= STATUS_IRQ_RAISED;
WRITE_ONCE(reg->status, cpu_to_le32(status));
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); if (ret) goto err_doorbell_cleanup;
status |= STATUS_DOORBELL_ENABLE_SUCCESS;
reg->status = cpu_to_le32(status); return;
err_doorbell_cleanup:
pci_epf_test_doorbell_cleanup(epf_test);
set_status_err:
status |= STATUS_DOORBELL_ENABLE_FAIL;
reg->status = cpu_to_le32(status);
}
staticvoid pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg)
{ enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
u32 status = le32_to_cpu(reg->status); struct pci_epf *epf = epf_test->epf; struct pci_epc *epc = epf->epc; int ret;
if (bar < BAR_0) goto set_status_err;
pci_epf_test_doorbell_cleanup(epf_test);
/* * The doorbell feature temporarily overrides the inbound translation * to point to the address stored in epf_test->db_bar.phys_addr, i.e., * it calls set_bar() twice without ever calling clear_bar(), as * calling clear_bar() would clear the BAR's PCI address assigned by * the host. Thus, when disabling the doorbell, restore the inbound * translation to point to the memory allocated for the BAR.
*/
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]); if (ret) goto set_status_err;
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
reg->status = cpu_to_le32(status);
return;
set_status_err:
status |= STATUS_DOORBELL_DISABLE_FAIL;
reg->status = cpu_to_le32(status);
}
if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n"); goto reset_handler;
}
if (irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n"); goto reset_handler;
}
switch (command) { case COMMAND_RAISE_INTX_IRQ: case COMMAND_RAISE_MSI_IRQ: case COMMAND_RAISE_MSIX_IRQ:
pci_epf_test_raise_irq(epf_test, reg); break; case COMMAND_WRITE:
pci_epf_test_write(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg); break; case COMMAND_READ:
pci_epf_test_read(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg); break; case COMMAND_COPY:
pci_epf_test_copy(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg); break; case COMMAND_ENABLE_DOORBELL:
pci_epf_test_enable_doorbell(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg); break; case COMMAND_DISABLE_DOORBELL:
pci_epf_test_disable_doorbell(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg); break; default:
dev_err(dev, "Invalid command 0x%x\n", command); break;
}
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (!epf_test->reg[bar]) continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
&epf->bar[bar]); if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar); if (bar == test_reg_bar) return ret;
}
}
base = pci_epf_alloc_space(epf, test_reg_size, bar,
epc_features, PRIMARY_INTERFACE); if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
}
staticint __init pci_epf_test_init(void)
{ int ret;
kpcitest_workqueue = alloc_workqueue("kpcitest",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kpcitest_workqueue) {
pr_err("Failed to allocate the kpcitest work queue\n"); return -ENOMEM;
}
ret = pci_epf_register_driver(&test_driver); if (ret) {
destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret); return ret;
}
return 0;
}
module_init(pci_epf_test_init);
staticvoid __exit pci_epf_test_exit(void)
{ if (kpcitest_workqueue)
destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I ");
MODULE_LICENSE("GPL v2");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.