/* * The last request could free the device from softirq context and * xen_blkif_free() can sleep.
*/ staticvoid xen_blkif_deferred_free(struct work_struct *work)
{ struct xen_blkif *blkif;
/* * Because freeing back to the cache may be deferred, it is not * safe to unload the module (and hence destroy the cache) until * this has completed. To prevent premature unloading, take an * extra module reference here and release only when the object * has been freed back to the cache.
*/
__module_get(THIS_MODULE);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
for (r = 0; r < blkif->nr_rings; r++) { struct xen_blkif_ring *ring = &blkif->rings[r]; unsignedint i = 0;
if (!ring->active) continue;
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
ring->xenblkd = NULL;
wake_up(&ring->shutdown_wq);
}
/* The above kthread_stop() guarantees that at this point we * don't have any discard_io or other_io requests. So, checking * for inflight IO is enough.
*/ if (atomic_read(&ring->inflight) > 0) {
busy = true; continue;
}
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
ring->irq = 0;
}
if (ring->blk_rings.common.sring) {
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;
}
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(ring);
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
kfree(req->segments[j]);
for (j = 0; j < MAX_INDIRECT_PAGES; j++)
kfree(req->indirect_pages[j]);
blkif->nr_ring_pages = 0; /* * blkif->rings was allocated in connect_ring, so we should free it in * here.
*/
kfree(blkif->rings);
blkif->rings = NULL;
blkif->nr_rings = 0;
if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk))
vbd->type |= VDISK_CDROM; if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
if (bdev_write_cache(file_bdev(bdev_file)))
vbd->flush_support = true; if (bdev_max_secure_erase_sectors(file_bdev(bdev_file)))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid); return 0;
}
/* * Entry point to this code when a new device is created. Allocate the basic * structures, and watch the store waiting for the hotplug scripts to tell us * the device's physical major and minor numbers. Switch to InitWait.
*/ staticint xen_blkbk_probe(struct xenbus_device *dev, conststruct xenbus_device_id *id)
{ int err; struct backend_info *be = kzalloc(sizeof(struct backend_info),
GFP_KERNEL);
/* match the pr_debug in xen_blkbk_remove */
pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
/* Multi-queue: advertise how many queues are supported by us.*/
err = xenbus_printf(XBT_NIL, dev->nodename, "multi-queue-max-queues", "%u", xenblk_max_queues); if (err)
pr_warn("Error writing multi-queue-max-queues\n");
/* * Callback received when the hotplug scripts have placed the physical-device * node. Read it and the mode node, and create a vbd. If the frontend is * ready, connect.
*/ staticvoid backend_changed(struct xenbus_watch *watch, constchar *path, constchar *token)
{ int err; unsigned major; unsigned minor; struct backend_info *be
= container_of(watch, struct backend_info, backend_watch); struct xenbus_device *dev = be->dev; int cdrom = 0; unsignedlong handle; char *device_type;
err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
&major, &minor); if (XENBUS_EXIST_ERR(err)) { /* * Since this watch will fire once immediately after it is * registered, we expect this. Ignore it, and wait for the * hotplug scripts.
*/ return;
} if (err != 2) {
xenbus_dev_fatal(dev, err, "reading physical-device"); return;
}
if (be->major | be->minor) { if (be->major != major || be->minor != minor)
pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
be->major, be->minor, major, minor); return;
}
/* Front end dir is a number, which is used as the handle. */
err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); if (err) {
kfree(be->mode);
be->mode = NULL; return;
}
/* * Callback received when the frontend's state changes.
*/ staticvoid frontend_changed(struct xenbus_device *dev, enum xenbus_state frontend_state)
{ struct backend_info *be = dev_get_drvdata(&dev->dev); int err;
switch (frontend_state) { case XenbusStateInitialising: if (dev->state == XenbusStateClosed) {
pr_info("%s: prepare for reconnect\n", dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
} break;
case XenbusStateInitialised: case XenbusStateConnected: /* * Ensure we connect even when two watches fire in * close succession and we miss the intermediate value * of frontend_state.
*/ if (dev->state == XenbusStateConnected) break;
/* * Enforce precondition before potential leak point. * xen_blkif_disconnect() is idempotent.
*/
err = xen_blkif_disconnect(be->blkif); if (err) {
xenbus_dev_fatal(dev, err, "pending I/O"); break;
}
err = connect_ring(be); if (err) { /* * Clean up so that memory resources can be used by * other devices. connect_ring reported already error.
*/
xen_blkif_disconnect(be->blkif); break;
}
xen_update_blkif_status(be->blkif); break;
case XenbusStateClosing:
xenbus_switch_state(dev, XenbusStateClosing); break;
case XenbusStateClosed:
xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break;
fallthrough; /* if not online */ case XenbusStateUnknown: /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev); break;
default:
xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
frontend_state); break;
}
}
/* Once a memory pressure is detected, squeeze free page pools for a while. */ staticunsignedint buffer_squeeze_duration_ms = 10;
module_param_named(buffer_squeeze_duration_ms,
buffer_squeeze_duration_ms, int, 0644);
MODULE_PARM_DESC(buffer_squeeze_duration_ms, "Duration in ms to squeeze pages buffer when a memory pressure is detected");
/* * Callback received when the memory pressure is detected.
*/ staticvoid reclaim_memory(struct xenbus_device *dev)
{ struct backend_info *be = dev_get_drvdata(&dev->dev);
if (!be) return;
be->blkif->buffer_squeeze_end = jiffies +
msecs_to_jiffies(buffer_squeeze_duration_ms);
}
/* ** Connection ** */
/* * Write the physical details regarding the block device to the store, and * switch to Connected state.
*/ staticvoid connect(struct backend_info *be)
{ struct xenbus_transaction xbt; int err; struct xenbus_device *dev = be->dev;
pr_debug("%s %s\n", __func__, dev->otherend);
/* Supply the information about the device the frontend needs */
again:
err = xenbus_transaction_start(&xbt); if (err) {
xenbus_dev_fatal(dev, err, "starting transaction"); return;
}
/* If we can't advertise it is OK. */
xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
xen_blkbk_discard(xbt, be);
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.