queue_work(snic_glob->event_q, &snic->link_work);
} /* end of snic_handle_link_event */
/* * snic_notify_set : sets notification area * This notification area is to receive events from fw * Note: snic supports only MSIX interrupts, in which we can just call * svnic_dev_notify_set directly
*/ staticint
snic_notify_set(struct snic *snic)
{ int ret = 0; enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
} else {
SNIC_HOST_ERR(snic->shost, "Interrupt mode should be setup before devcmd notify set %d\n",
intr_mode);
ret = -1;
}
return ret;
} /* end of snic_notify_set */
/* * snic_dev_wait : polls vnic open status.
*/ staticint
snic_dev_wait(struct vnic_dev *vdev, int (*start)(struct vnic_dev *, int), int (*finished)(struct vnic_dev *, int *), int arg)
{ unsignedlong time; int ret, done; int retry_cnt = 0;
ret = start(vdev, arg); if (ret) return ret;
/* * Wait for func to complete...2 seconds max. * * Sometimes schedule_timeout_uninterruptible take long time * to wakeup, which results skipping retry. The retry counter * ensures to retry at least two times.
*/
time = jiffies + (HZ * 2); do {
ret = finished(vdev, &done); if (ret) return ret;
if (done) return 0;
schedule_timeout_uninterruptible(HZ/10);
++retry_cnt;
} while (time_after(time, jiffies) || (retry_cnt < 3));
return -ETIMEDOUT;
} /* end of snic_dev_wait */
/* * snic_cleanup: called by snic_remove * Stops the snic device, masks all interrupts, Completed CQ entries are * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
*/ staticint
snic_cleanup(struct snic *snic)
{ unsignedint i; int ret;
svnic_dev_disable(snic->vdev); for (i = 0; i < snic->intr_count; i++)
svnic_intr_mask(&snic->intr[i]);
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_disable(&snic->wq[i]); if (ret) return ret;
}
/* Clean up completed IOs */
snic_fwcq_cmpl_handler(snic, -1);
snic_wq_cmpl_handler(snic, -1);
/* Clean up the IOs that have not completed */ for (i = 0; i < snic->wq_count; i++)
svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_clean(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_clean(&snic->intr[i]);
/* Cleanup snic specific requests */
snic_free_all_untagged_reqs(snic);
/* * Allocate SCSI Host and setup association between host, and snic
*/
shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); if (!shost) {
SNIC_ERR("Unable to alloc scsi_host\n");
ret = -ENOMEM;
ret = pci_enable_device(pdev); if (ret) {
SNIC_HOST_ERR(shost, "Cannot enable PCI Resources, aborting : %d\n",
ret);
goto err_free_snic;
}
ret = pci_request_regions(pdev, SNIC_DRV_NAME); if (ret) {
SNIC_HOST_ERR(shost, "Cannot obtain PCI Resources, aborting : %d\n",
ret);
goto err_pci_disable;
}
pci_set_master(pdev);
/* * Query PCI Controller on system for DMA addressing * limitation for the device. Try 43-bit first, and * fail to 32-bit.
*/
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) {
SNIC_HOST_ERR(shost, "No Usable DMA Configuration, aborting %d\n",
ret); goto err_rel_regions;
}
}
/* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
ret = -ENODEV; goto err_rel_regions;
}
snic->bar0.vaddr = pci_iomap(pdev, 0, 0); if (!snic->bar0.vaddr) {
SNIC_HOST_ERR(shost, "Cannot memory map BAR0 res hdr aborting.\n");
shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
snic_get_res_counts(snic);
/* * Assumption: Only MSIx is supported
*/
ret = snic_set_intr_mode(snic); if (ret) {
SNIC_HOST_ERR(shost, "Failed to set intr mode aborting. %d\n",
ret);
goto err_dev_close;
}
ret = snic_alloc_vnic_res(snic); if (ret) {
SNIC_HOST_ERR(shost, "Failed to alloc vNIC resources aborting. %d\n",
ret);
goto err_clear_intr;
}
/* Initialize specific lists */
INIT_LIST_HEAD(&snic->list);
/* * spl_cmd_list for maintaining snic specific cmds * such as EXCH_VER_REQ, REPORT_TARGETS etc
*/
INIT_LIST_HEAD(&snic->spl_cmd_list);
spin_lock_init(&snic->spl_cmd_lock);
/* initialize all snic locks */
spin_lock_init(&snic->snic_lock);
for (i = 0; i < SNIC_WQ_MAX; i++)
spin_lock_init(&snic->wq_lock[i]);
for (i = 0; i < SNIC_IO_LOCKS; i++)
spin_lock_init(&snic->io_req_lock[i]);
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
ret = -ENOMEM; goto err_free_res;
}
snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
ret = -ENOMEM; goto err_free_dflt_sgl_pool;
}
snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
pool = mempool_create_slab_pool(2,
snic_glob->req_cache[SNIC_REQ_TM_CACHE]); if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
ret = -ENOMEM; goto err_free_max_sgl_pool;
}
snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
/* Initialize snic state */
atomic_set(&snic->state, SNIC_INIT);
atomic_set(&snic->ios_inflight, 0);
/* Setup notification buffer area */
ret = snic_notify_set(snic); if (ret) {
SNIC_HOST_ERR(shost, "Failed to alloc notify buffer aborting. %d\n",
ret);
/* * snic_remove : invoked on unbinding the interface to cleanup the * resources allocated in snic_probe on initialization.
*/ staticvoid
snic_remove(struct pci_dev *pdev)
{ struct snic *snic = pci_get_drvdata(pdev); unsignedlong flags;
if (!snic) {
SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
return;
}
/* * Mark state so that the workqueue thread stops forwarding * received frames and link events. ISR and other threads * that can queue work items will also stop creating work * items on the snic workqueue
*/
snic_set_state(snic, SNIC_OFFLINE);
spin_lock_irqsave(&snic->snic_lock, flags);
snic->stop_link_events = 1;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* * This stops the snic device, masks all interrupts, Completed * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are * cleanup
*/
snic_cleanup(snic);
/* this frees Scsi_Host and snic memory (continuous chunk) */
scsi_host_put(snic->shost);
} /* end of snic_remove */
struct snic_global *snic_glob;
/* * snic_global_data_init: Initialize SNIC Global Data * Notes: All the global lists, variables should be part of global data * this helps in debugging.
*/ staticint
snic_global_data_init(void)
{ int ret = 0; struct kmem_cache *cachep;
ssize_t len = 0;
/* Create a cache for allocation of max size Extended SGLs */
len = sizeof(struct snic_req_info);
len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL); if (!cachep) {
SNIC_ERR("Failed to create snic max sgl slab\n");
ret = -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.