/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE.
*/
struct vscsifrnt_shadow { /* command between backend and frontend */ unsignedchar act;
uint8_t nr_segments;
uint16_t rqid;
uint16_t ref_rqid;
bool inflight;
unsignedint nr_grants; /* number of grants in gref[] */ struct scsiif_request_segment *sg; /* scatter/gather elements */ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */
int32_t rslt_reset; /* reset response status: */ /* SUCCESS or FAILED or: */ #define RSLT_RESET_WAITING 0 #define RSLT_RESET_ERR -1
/* Requested struct scsi_cmnd is stored from kernel. */ struct scsi_cmnd *sc; int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
};
staticvoid scsifront_gnttab_done(struct vscsifrnt_info *info, struct vscsifrnt_shadow *shadow)
{ int i;
if (shadow->sc->sc_data_direction == DMA_NONE) return;
for (i = 0; i < shadow->nr_grants; i++) { if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
scsifront_set_error(info, "grant still in use by backend"); return;
}
}
kfree(shadow->sg);
}
staticunsignedint scsifront_host_byte(int32_t rslt)
{ switch (XEN_VSCSIIF_RSLT_HOST(rslt)) { case XEN_VSCSIIF_RSLT_HOST_OK: return DID_OK; case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT: return DID_NO_CONNECT; case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY: return DID_BUS_BUSY; case XEN_VSCSIIF_RSLT_HOST_TIME_OUT: return DID_TIME_OUT; case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET: return DID_BAD_TARGET; case XEN_VSCSIIF_RSLT_HOST_ABORT: return DID_ABORT; case XEN_VSCSIIF_RSLT_HOST_PARITY: return DID_PARITY; case XEN_VSCSIIF_RSLT_HOST_ERROR: return DID_ERROR; case XEN_VSCSIIF_RSLT_HOST_RESET: return DID_RESET; case XEN_VSCSIIF_RSLT_HOST_BAD_INTR: return DID_BAD_INTR; case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH: return DID_PASSTHROUGH; case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR: return DID_SOFT_ERROR; case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY: return DID_IMM_RETRY; case XEN_VSCSIIF_RSLT_HOST_REQUEUE: return DID_REQUEUE; case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED: return DID_TRANSPORT_DISRUPTED; case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST: return DID_TRANSPORT_FAILFAST; case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL: return DID_TRANSPORT_MARGINAL; default: return DID_ERROR;
}
}
if (ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
!info->shadow[ring_rsp->rqid]->inflight) {
scsifront_set_error(info, "illegal rqid returned by backend!"); return;
}
shadow = info->shadow[ring_rsp->rqid];
shadow->inflight = false;
if (shadow->act == VSCSIIF_ACT_SCSI_CDB)
scsifront_cdb_cmd_done(info, ring_rsp); else
scsifront_sync_cmd_done(info, ring_rsp);
}
staticint scsifront_ring_drain(struct vscsifrnt_info *info, unsignedint *eoiflag)
{ struct vscsiif_response ring_rsp;
RING_IDX i, rp; int more_to_do = 0;
rp = READ_ONCE(info->ring.sring->rsp_prod);
virt_rmb(); /* ordering required respective to backend */ if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
scsifront_set_error(info, "illegal number of responses"); return 0;
} for (i = info->ring.rsp_cons; i != rp; i++) {
RING_COPY_RESPONSE(&info->ring, i, &ring_rsp);
scsifront_do_response(info, &ring_rsp); if (info->host_active == STATE_ERROR) return 0;
*eoiflag &= ~XEN_EOI_FLAG_SPURIOUS;
}
info->ring.rsp_cons = i;
if (i != info->ring.req_prod_pvt)
RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); else
info->ring.sring->rsp_event = i + 1;
page++;
len -= bytes;
off = 0;
ref_cnt++;
}
BUG_ON(seg_grants < ref_cnt);
seg_grants = ref_cnt;
}
scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes.
*/
bytes = min_t(unsignedint, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
/* * Any exception handling (reset or abort) must be forwarded to the backend. * We have to wait until an answer is returned. This answer contains the * result to be returned to the requestor.
*/ staticint scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
{ struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); int err = 0;
if (info->host_active == STATE_ERROR) return FAILED;
shadow = kzalloc(sizeof(*shadow), GFP_NOIO); if (!shadow) return FAILED;
/* * Front device state path, used in sdev_configure called * on successfull scsi_add_device, and in sdev_destroy called * on remove of a device.
*/
snprintf(info->dev_state_path, sizeof(info->dev_state_path), "vscsi-devs/%s/state", dir[i]);
switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state != XenbusStateInitialised) break;
if (scsi_add_device(info->host, chn, tgt, lun)) {
dev_err(&dev->dev, "scsi_add_device\n");
err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed); if (err)
xenbus_dev_error(dev, err, "%s: writing dev_state_path", __func__);
} break; case VSCSIFRONT_OP_DEL_LUN: if (device_state != XenbusStateClosing) break;
if (!info->pause && sg_grant)
dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); elseif (info->pause && nr_segs < host->sg_tablesize)
dev_warn(&dev->dev, "SG entries decreased from %d to %u - device may not work properly anymore\n",
host->sg_tablesize, nr_segs);
if (xenbus_read_driver_state(dev->nodename) ==
XenbusStateInitialised)
scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
if (dev->state != XenbusStateConnected)
xenbus_switch_state(dev, XenbusStateConnected); break;
case XenbusStateClosed: if (dev->state == XenbusStateClosed) break;
fallthrough; /* Missed the backend's Closing state */ case XenbusStateClosing:
scsifront_disconnect(info); break;
case XenbusStateReconfiguring:
scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
xenbus_switch_state(dev, XenbusStateReconfiguring); break;
case XenbusStateReconfigured:
scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
xenbus_switch_state(dev, XenbusStateConnected); break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.