module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
/* Max number of requests before requeueing the job. * Using this limit prevents one virtqueue from starving others with * request.
*/ #define VHOST_SCSI_WEIGHT 256
struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ struct completion comp; /* Refcount for the inflight reqs */ struct kref kref;
};
struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; /* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
u32 tvc_prot_sgl_count;
u32 copied_iov:1; constvoid *read_iov; struct iov_iter *read_iter; struct scatterlist *sgl; struct sg_table table; struct scatterlist *prot_sgl; struct sg_table prot_table; /* Fast path response header iovec used when only one vec is needed */ struct iovec tvc_resp_iov; /* Number of iovs for response */ unsignedint tvc_resp_iovs_cnt; /* Pointer to response header iovecs if more than one is needed */ struct iovec *tvc_resp_iovs; /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tvc_se_cmd; /* Sense buffer that will be mapped into outgoing status */ unsignedchar tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; /* * Dirty write descriptors of this command.
*/ struct vhost_log *tvc_log; unsignedint tvc_log_num; /* Completed commands list, serviced from vhost worker thread */ struct llist_node tvc_completion_list; /* Used to track inflight cmd */ struct vhost_scsi_inflight *inflight;
};
struct vhost_scsi_nexus { /* Pointer to TCM session for I_T Nexus */ struct se_session *tvn_se_sess;
};
struct vhost_scsi_tpg { /* Vhost port target portal group tag for TCM */
u16 tport_tpgt; /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */ int tv_tpg_port_count; /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ int tv_tpg_vhost_count; /* Used for enabling T10-PI with legacy devices */ int tv_fabric_prot_type; /* list for vhost_scsi_list */ struct list_head tv_tpg_list; /* Used to protect access for tpg_nexus */ struct mutex tv_tpg_mutex; /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ struct vhost_scsi_nexus *tpg_nexus; /* Pointer back to vhost_scsi_tport */ struct vhost_scsi_tport *tport; /* Returned by vhost_scsi_make_tpg() */ struct se_portal_group se_tpg; /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ struct vhost_scsi *vhost_scsi;
};
struct vhost_scsi_tport { /* SCSI protocol the tport is providing */
u8 tport_proto_id; /* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn; /* ASCII formatted WWPN for Vhost Target port */ char tport_name[VHOST_SCSI_NAMELEN]; /* Returned by vhost_scsi_make_tport() */ struct se_wwn tport_wwn;
};
struct vhost_scsi_evt { /* event to be sent to guest */ struct virtio_scsi_event event; /* event list, serviced from vhost worker thread */ struct llist_node list;
};
staticunsigned vhost_scsi_max_io_vqs = 128;
module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
struct vhost_scsi_virtqueue { struct vhost_virtqueue vq; struct vhost_scsi *vs; /* * Reference counting for inflight reqs, used for flush operation. At * each time, one reference tracks new commands submitted, while we * wait for another one to reach 0.
*/ struct vhost_scsi_inflight inflights[2]; /* * Indicate current inflight in use, protected by vq->mutex. * Writers must also take dev mutex and flush under it.
*/ int inflight_idx; struct vhost_scsi_cmd *scsi_cmds; struct sbitmap scsi_tags; int max_cmds; struct page **upages;
struct se_cmd se_cmd;
u8 scsi_resp; struct vhost_scsi_inflight *inflight; struct iovec resp_iov; int in_iovs; int vq_desc;
/* * Dirty write descriptors of this command.
*/ struct vhost_log *tmf_log; unsignedint tmf_log_num;
};
/* * Context for processing request and control queue operations.
*/ struct vhost_scsi_ctx { int head; unsignedint out, in;
size_t req_size, rsp_size;
size_t out_size, in_size;
u8 *target, *lunp; void *req; struct iov_iter out_iter;
};
/* * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO * configfs management operations.
*/ static DEFINE_MUTEX(vhost_scsi_mutex); static LIST_HEAD(vhost_scsi_list);
for_each_sgtable_sg(&cmd->table, sg, i) {
page = sg_page(sg); if (!page) continue;
len = sg->length;
if (copy_page_to_iter(page, 0, len, iter) != len) {
pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
len); return -1;
}
}
return 0;
}
/* Fill in status and signal that we are done processing this command * * This is scheduled in the vhost work queue so we are called with the owner * process mm and can access the vring.
*/ staticvoid vhost_scsi_complete_cmd_work(struct vhost_work *work)
{ struct vhost_scsi_virtqueue *svq = container_of(work, struct vhost_scsi_virtqueue, completion_work); struct virtio_scsi_cmd_resp v_rsp; struct vhost_scsi_cmd *cmd, *t; struct llist_node *llnode; struct se_cmd *se_cmd; struct iov_iter iov_iter; bool signal = false; int ret;
if (page) {
put_page(page);
revert_bytes += curr->length;
} /* Clear so we can re-use it for the copy path */
sg_set_page(curr, NULL, 0, 0);
curr = sg_next(curr);
}
iov_iter_revert(iter, revert_bytes);
}
/* * Map a user memory range into a scatterlist * * Returns the number of scatterlist entries used or -errno on error.
*/ staticint
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, struct sg_table *sg_table, struct scatterlist **sgl, bool is_prot)
{ struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); struct page **pages = svq->upages; struct scatterlist *sg = *sgl;
ssize_t bytes;
size_t offset; unsignedint n, npages = 0;
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset); /* No pages were pinned */ if (bytes <= 0) return bytes < 0 ? bytes : -EFAULT;
while (bytes) {
n = min_t(unsignedint, PAGE_SIZE - offset, bytes); /* * The block layer requires bios/requests to be a multiple of * 512 bytes, but Windows can send us vecs that are misaligned. * This can result in bios and later requests with misaligned * sizes if we have to break up a cmd/scatterlist into multiple * bios. * * We currently only break up a command into multiple bios if * we hit the vec/seg limit, so check if our sgl_count is * greater than the max and if a vec in the cmd has a * misaligned offset/size.
*/ if (!is_prot &&
(offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
cmd->tvc_sgl_count > BIO_MAX_VECS) {
WARN_ONCE(true, "vhost-scsi detected misaligned IO. Performance may be degraded."); goto revert_iter_get_pages;
}
/* On error, stop handling until the next kick. */ if (unlikely(vc->head < 0)) goto done;
/* Nothing new? Wait for eventfd to tell us they refilled. */ if (vc->head == vq->num) { if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
vhost_disable_notify(&vs->dev, vq);
ret = -EAGAIN;
} goto done;
}
/* * Get the size of request and response buffers. * FIXME: Not correct for BIDI operation
*/
vc->out_size = iov_length(vq->iov, vc->out);
vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
/* * Copy over the virtio-scsi request header, which for a * ANY_LAYOUT enabled guest may span multiple iovecs, or a * single iovec may contain both the header + outgoing * WRITE payloads. * * copy_from_iter() will advance out_iter, so that it will * point at the start of the outgoing WRITE payload, if * DMA_TO_DEVICE is set.
*/
iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
ret = 0;
done: return ret;
}
staticint
vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
{ if (unlikely(vc->in_size < vc->rsp_size)) {
vq_err(vq, "Response buf too small, need min %zu bytes got %zu",
vc->rsp_size, vc->in_size); return -EINVAL;
} elseif (unlikely(vc->out_size < vc->req_size)) {
vq_err(vq, "Request buf too small, need min %zu bytes got %zu",
vc->req_size, vc->out_size); return -EIO;
}
return 0;
}
staticint
vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, struct vhost_scsi_tpg **tpgp)
{ int ret = -EIO;
if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
&vc->out_iter))) {
vq_err(vq, "Faulted on copy_from_iter_full\n");
} elseif (unlikely(*vc->lunp != 1)) { /* virtio-scsi spec requires byte 0 of the lun to be 1 */
vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
} else { struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
if (vc->target) { /* validated at handler entry */
vs_tpg = vhost_vq_get_backend(vq);
tpg = READ_ONCE(vs_tpg[*vc->target]); if (unlikely(!tpg)) goto out;
}
if (tpgp)
*tpgp = tpg;
ret = 0;
}
out: return ret;
}
staticint
vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs, unsignedint in_iovs_cnt)
{ int i, cnt;
if (!in_iovs_cnt) return 0; /* * Initiators normally just put the virtio_scsi_cmd_resp in the first * iov, but just in case they wedged in some data with it we check for * greater than or equal to the response struct.
*/ if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
cmd->tvc_resp_iovs_cnt = 1;
} else { /* * Legacy descriptor layouts didn't specify that we must put * the entire response in one iov. Worst case we have a * iov per byte.
*/
cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
GFP_KERNEL); if (!cmd->tvc_resp_iovs) return -ENOMEM;
cmd->tvc_resp_iovs_cnt = cnt;
}
for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
cmd->tvc_resp_iovs[i] = in_iovs[i];
mutex_lock(&vq->mutex); /* * We can handle the vq only after the endpoint is setup by calling the * VHOST_SCSI_SET_ENDPOINT ioctl.
*/
vs_tpg = vhost_vq_get_backend(vq); if (!vs_tpg) goto out;
do {
ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num); if (ret) goto err;
/* * Setup pointers and values based upon different virtio-scsi * request header if T10_PI is enabled in KVM guest.
*/ if (t10_pi) {
vc.req = &v_req_pi;
vc.req_size = sizeof(v_req_pi);
vc.lunp = &v_req_pi.lun[0];
vc.target = &v_req_pi.lun[1];
} else {
vc.req = &v_req;
vc.req_size = sizeof(v_req);
vc.lunp = &v_req.lun[0];
vc.target = &v_req.lun[1];
}
/* * Validate the size of request and response buffers. * Check for a sane response buffer so we can report * early errors back to the guest.
*/
ret = vhost_scsi_chk_size(vq, &vc); if (ret) goto err;
ret = vhost_scsi_get_req(vq, &vc, &tpg); if (ret) goto err;
ret = -EIO; /* bad target on any error from here on */
/* * Determine data_direction by calculating the total outgoing * iovec sizes + incoming iovec sizes vs. virtio-scsi request + * response headers respectively. * * For DMA_TO_DEVICE this is out_iter, which is already pointing * to the right place. * * For DMA_FROM_DEVICE, the iovec will be just past the end * of the virtio-scsi response header in either the same * or immediately following iovec. * * Any associated T10_PI bytes for the outgoing / incoming * payloads are included in calculation of exp_data_len here.
*/
prot_bytes = 0;
iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
vc.rsp_size + exp_data_len);
iov_iter_advance(&in_iter, vc.rsp_size);
data_iter = in_iter;
} else {
data_direction = DMA_NONE;
exp_data_len = 0;
} /* * If T10_PI header + payload is present, setup prot_iter values * and recalculate data_iter for vhost_scsi_mapal() mapping to * host scatterlists via get_user_pages_fast().
*/ if (t10_pi) { if (v_req_pi.pi_bytesout) { if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero pi_bytesout," " but wrong data_direction\n"); goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} elseif (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero pi_bytesin," " but wrong data_direction\n"); goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
} /* * Set prot_iter to data_iter and truncate it to * prot_bytes, and advance data_iter past any * preceding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the * actual data payload length.
*/ if (prot_bytes) {
exp_data_len -= prot_bytes;
prot_iter = data_iter;
iov_iter_truncate(&prot_iter, prot_bytes);
iov_iter_advance(&data_iter, prot_bytes);
}
tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
lun = vhost_buf_to_lun(v_req_pi.lun);
} else {
tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
lun = vhost_buf_to_lun(v_req.lun);
} /* * Check that the received CDB size does not exceeded our * hardcoded max for vhost-scsi, then get a pre-allocated * cmd descriptor for the new virtio-scsi tag. * * TODO what if cdb was too small for varlen cdb header?
*/ if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); goto err;
}
nexus = tpg->tpg_nexus; if (!nexus) {
vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
ret = -EIO; goto err;
}
if (data_direction != DMA_NONE) {
ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
exp_data_len, &data_iter,
data_direction); if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); goto err;
}
} /* * Save the descriptor from vhost_get_vq_desc() to be used to * complete the virtio-scsi request in TCM callback context via * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
cmd->tvc_vq_desc = vc.head;
vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
data_direction,
exp_data_len + prot_bytes);
ret = 0;
err: /* * ENXIO: No more requests, or read error, wait for next kick * EINVAL: Invalid response buffer, drop the request * EIO: Respond with bad target * EAGAIN: Pending request * ENOMEM: Could not allocate resources for request
*/ if (ret == -ENXIO) break; elseif (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
vhost_scsi_log_write(vq, vq_log, log_num);
} elseif (ret == -ENOMEM) {
vhost_scsi_send_status(vs, vq, &vc,
SAM_STAT_TASK_SET_FULL);
vhost_scsi_log_write(vq, vq_log, log_num);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
staticvoid
vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, int in_iovs, int vq_desc, struct iovec *resp_iov, int tmf_resp_code)
{ struct virtio_scsi_ctrl_tmf_resp rsp; struct iov_iter iov_iter; int ret;
staticvoid vhost_scsi_tmf_flush_work(struct work_struct *work)
{ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
flush_work); struct vhost_virtqueue *vq = &tmf->svq->vq; /* * Make sure we have sent responses for other commands before we * send our response.
*/
vhost_dev_flush(vq->dev); if (!vhost_vq_work_queue(vq, &tmf->vwork))
vhost_scsi_release_tmf_res(tmf);
}
mutex_lock(&vq->mutex); /* * We can handle the vq only after the endpoint is setup by calling the * VHOST_SCSI_SET_ENDPOINT ioctl.
*/ if (!vhost_vq_get_backend(vq)) goto out;
do {
ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num); if (ret) goto err;
/* * Get the request type first in order to setup * other parameters dependent on the type.
*/
vc.req = &v_req.type;
typ_size = sizeof(v_req.type);
if (unlikely(!copy_from_iter_full(vc.req, typ_size,
&vc.out_iter))) {
vq_err(vq, "Faulted on copy_from_iter tmf type\n"); /* * The size of the response buffer depends on the * request type and must be validated against it. * Since the request type is not known, don't send * a response.
*/ continue;
}
/* * Validate the size of request and response buffers. * Check for a sane response buffer so we can report * early errors back to the guest.
*/
ret = vhost_scsi_chk_size(vq, &vc); if (ret) goto err;
/* * Get the rest of the request now that its size is known.
*/
vc.req += typ_size;
vc.req_size -= typ_size;
ret = vhost_scsi_get_req(vq, &vc, &tpg); if (ret) goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
vq_log, log_num); else {
vhost_scsi_send_an_resp(vs, vq, &vc);
vhost_scsi_log_write(vq, vq_log, log_num);
}
err: /* * ENXIO: No more requests, or read error, wait for next kick * EINVAL: Invalid response buffer, drop the request * EIO: Respond with bad target * EAGAIN: Pending request
*/ if (ret == -ENXIO) break; elseif (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc,
v_req.type == VIRTIO_SCSI_T_TMF ?
TYPE_CTRL_TMF :
TYPE_CTRL_AN);
vhost_scsi_log_write(vq, vq_log, log_num);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
evt = vhost_scsi_allocate_evt(vs, event, reason); if (!evt) return;
if (tpg && lun) { /* TODO: share lun setup code with virtio-scsi.ko */ /* * Note: evt->event is zeroed when we allocate it and * lun[4-7] need to be zero according to virtio-scsi spec.
*/
evt->event.lun[0] = 0x01;
evt->event.lun[1] = tpg->tport_tpgt; if (lun->unpacked_lun >= 256)
evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
evt->event.lun[3] = lun->unpacked_lun & 0xFF;
}
llist_add(&evt->list, &vs->vs_event_list); if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
vhost_scsi_complete_events(vs, true);
}
/* Callers must hold dev mutex */ staticvoid vhost_scsi_flush(struct vhost_scsi *vs)
{ int i;
/* Init new inflight and remember the old inflight */
vhost_scsi_init_inflight(vs, vs->old_inflight);
/* * The inflight->kref was initialized to 1. We decrement it here to * indicate the start of the flush operation so that it will reach 0 * when all the reqs are finished.
*/ for (i = 0; i < vs->dev.nvqs; i++)
kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */ for (i = 0; i < vs->dev.nvqs; i++)
wait_for_completion(&vs->old_inflight[i]->comp);
}
/* * Called from vhost_scsi_ioctl() context to walk the list of available * vhost_scsi_tpg with an active struct vhost_scsi_nexus * * The lock nesting rule is: * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
*/ staticint
vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t)
{ struct se_portal_group *se_tpg; struct vhost_scsi_tport *tv_tport; struct vhost_scsi_tpg *tpg; struct vhost_scsi_tpg **vs_tpg; struct vhost_virtqueue *vq; int index, ret, i, len; bool match = false;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT; goto out;
}
}
if (vs->vs_tpg) {
pr_err("vhost-scsi endpoint already set for %s.\n",
vs->vs_vhost_wwpn);
ret = -EEXIST; goto out;
}
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) {
ret = -ENOMEM; goto out;
}
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, * go ahead and take an explicit se_tpg->tpg_group.cg_item * dependency now.
*/
se_tpg = &tpg->se_tpg;
ret = target_depend_item(&se_tpg->tpg_group.cg_item); if (ret) {
pr_warn("target_depend_item() failed: %d\n", ret);
mutex_unlock(&tpg->tv_tpg_mutex);
mutex_unlock(&vhost_scsi_mutex); goto undepend;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
}
mutex_unlock(&vhost_scsi_mutex);
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, sizeof(vs->vs_vhost_wwpn));
for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq; if (!vhost_vq_is_setup(vq)) continue;
ret = vhost_scsi_setup_vq_cmds(vq, vq->num); if (ret) goto destroy_vq_cmds;
}
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
} else {
ret = -ENODEV; goto free_tpg;
}
/* * Act as synchronize_rcu to make sure requests after this point * see a fully setup device.
*/
vhost_scsi_flush(vs);
vs->vs_tpg = vs_tpg; goto out;
destroy_vq_cmds: for (i--; i >= VHOST_SCSI_VQ_IO; i--) { if (!vhost_vq_get_backend(&vs->vqs[i].vq))
vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
}
undepend: for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
tpg = vs_tpg[i]; if (tpg) {
mutex_lock(&tpg->tv_tpg_mutex);
tpg->vhost_scsi = NULL;
tpg->tv_tpg_vhost_count--;
mutex_unlock(&tpg->tv_tpg_mutex);
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
}
}
free_tpg:
kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex); return ret;
}
mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT; goto err_dev;
}
}
if (!vs->vs_tpg) {
ret = 0; goto err_dev;
}
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
tpg = vs->vs_tpg[target]; if (!tpg) continue;
tv_tport = tpg->tport; if (!tv_tport) {
ret = -ENODEV; goto err_dev;
}
if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
tv_tport->tport_name, tpg->tport_tpgt,
t->vhost_wwpn, t->vhost_tpgt);
ret = -EINVAL; goto err_dev;
}
match = true;
} if (!match) goto free_vs_tpg;
/* Prevent new cmds from starting and accessing the tpgs/sessions */ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
} /* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
/* * We can now release our hold on the tpg and sessions and userspace * can free them after this point.
*/ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
tpg = vs->vs_tpg[target]; if (!tpg) continue;
free_vs_tpg: /* * Act as synchronize_rcu to make sure access to * old vs->vs_tpg is finished.
*/
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = NULL;
memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
WARN_ON(vs->vs_events_nr);
mutex_unlock(&vs->dev.mutex); return 0;
is_log = features & (1 << VHOST_F_LOG_ALL); /* * All VQs should have same feature.
*/
was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
/* * If VHOST_F_LOG_ALL is removed, free tvc_log after * vq->acked_features is committed.
*/ if (!is_log && was_log) { for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) { if (!vs->vqs[i].scsi_cmds) continue;
if (plug)
reason = VIRTIO_SCSI_EVT_RESET_RESCAN; else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex); /* * We can't queue events if the backend has been cleared, because * we could end up queueing an event after the flush.
*/ if (!vhost_vq_get_backend(vq)) goto unlock;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.