/* * One list to contain all tape devices of all disciplines, so * we can assign the devices to minor numbers of the same major * The list is protected by the rwlock
*/ static LIST_HEAD(tape_device_list); static DEFINE_RWLOCK(tape_device_lock);
/* * Some channel attached tape specific attributes. * * FIXME: In the future the first_minor and blocksize attribute should be * replaced by a link to the cdev tree.
*/ static ssize_t
tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct tape_device *tdev;
oldstate = device->medium_state; if (oldstate == newstate) return;
device->medium_state = newstate; switch(newstate){ case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0); if (oldstate == MS_LOADED)
tape_med_state_work(device, MS_UNLOADED); break; case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0); if (oldstate == MS_UNLOADED)
tape_med_state_work(device, MS_LOADED); break; default: break;
}
wake_up(&device->state_change_wq);
}
/* * Stop running ccw. Has to be called with the device lock held.
*/ staticint
__tape_cancel_io(struct tape_device *device, struct tape_request *request)
{ int retries; int rc;
/* Check if interrupt has already been processed */ if (request->callback == NULL) return 0;
switch (rc) { case 0:
request->status = TAPE_REQUEST_DONE; return 0; case -EBUSY:
request->status = TAPE_REQUEST_CANCEL;
schedule_delayed_work(&device->tape_dnr, 0); return 0; case -ENODEV:
DBF_EXCEPTION(2, "device gone, retry\n"); break; case -EIO:
DBF_EXCEPTION(2, "I/O error, retry\n"); break; default:
BUG();
}
}
return rc;
}
/* * Add device into the sorted list, giving it the first * available minor number.
*/ staticint
tape_assign_minor(struct tape_device *device)
{ struct tape_device *tmp; int minor;
minor = 0;
write_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) { if (minor < tmp->first_minor) break;
minor += TAPE_MINORS_PER_DEV;
} if (minor >= 256) {
write_unlock(&tape_device_lock); return -ENODEV;
}
device->first_minor = minor;
list_add_tail(&device->node, &tmp->node);
write_unlock(&tape_device_lock); return 0;
}
/* remove device from the list */ staticvoid
tape_remove_minor(struct tape_device *device)
{
write_lock(&tape_device_lock);
list_del_init(&device->node);
device->first_minor = -1;
write_unlock(&tape_device_lock);
}
/* * Set a device online. * * This function is called by the common I/O layer to move a device from the * detected but offline into the online state. * If we return an error (RC < 0) the device remains in the offline state. This * can happen if the device is assigned somewhere else, for example.
*/ int
tape_generic_online(struct tape_device *device, struct tape_discipline *discipline)
{ int rc;
/* * Set device offline. * * Called by the common I/O layer if the drive should set offline on user * request. We may prevent this by returning an error. * Manual offline is only allowed while the drive is not in use.
*/ int
tape_generic_offline(struct ccw_device *cdev)
{ struct tape_device *device;
device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV;
}
/* * Get a reference to an existing device structure. This will automatically * increment the reference count.
*/ struct tape_device *
tape_get_device(struct tape_device *device)
{ int count;
/* * Decrease the reference counter of a devices structure. If the * reference counter reaches zero free the device structure. * The function returns a NULL pointer to be used by the caller * for clearing reference pointers.
*/ void
tape_put_device(struct tape_device *device)
{ int count;
/* * Driverfs tape remove function. * * This function is called whenever the common I/O layer detects the device * gone. This can happen at any time and we cannot refuse.
*/ void
tape_generic_remove(struct ccw_device *cdev)
{ struct tape_device * device;
spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
fallthrough; case TS_NOT_OPER: /* * Nothing to do.
*/
spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; case TS_UNUSED: /* * Need only to release the device.
*/
tape_state_set(device, TS_NOT_OPER);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_cleanup_device(device); break; default: /* * There may be requests on the queue. We will not get * an interrupt for a request that was running. So we * just post them all as I/O errors.
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
pr_warn("%s: A tape unit was detached while in use\n",
dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_cleanup_device(device);
}
DBF_LH(6, "__tape_start_next_request(%p)\n", device); /* * Try to start each request on request queue until one is * started successful.
*/
list_for_each_safe(l, n, &device->req_queue) {
request = list_entry(l, struct tape_request, list);
/* * Avoid race condition if bottom-half was triggered more than * once.
*/ if (request->status == TAPE_REQUEST_IN_IO) return; /* * Request has already been stopped. We have to wait until * the request is removed from the queue in the interrupt * handling.
*/ if (request->status == TAPE_REQUEST_DONE) return;
/* * We wanted to cancel the request but the common I/O layer * was busy at that time. This can only happen if this * function is called by delayed_next_request. * Otherwise we start the next request on the queue.
*/ if (request->status == TAPE_REQUEST_CANCEL) {
rc = __tape_cancel_io(device, request);
} else {
rc = __tape_start_io(device, request);
} if (rc == 0) return;
/* * I/O helper function. Adds the request to the request queue * and starts it if the tape is idle. Has to be called with * the device lock held.
*/ staticint
__tape_start_request(struct tape_device *device, struct tape_request *request)
{ int rc;
switch (request->op) { case TO_MSEN: case TO_ASSIGN: case TO_UNASSIGN: case TO_READ_ATTMSG: case TO_RDC: if (device->tape_state == TS_INIT) break; if (device->tape_state == TS_UNUSED) break;
fallthrough; default: if (device->tape_state == TS_BLKUSE) break; if (device->tape_state != TS_IN_USE) return -ENODEV;
}
/* Increase use count of device for the added request. */
request->device = tape_get_device(device);
if (list_empty(&device->req_queue)) { /* No other requests are on the queue. Start this one. */
rc = __tape_start_io(device, request); if (rc) return rc;
/* * Add the request to the request queue, try to start it if the * tape is idle. Return without waiting for end of i/o.
*/ int
tape_do_io_async(struct tape_device *device, struct tape_request *request)
{ int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc;
}
/* * tape_do_io/__tape_wake_up * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion.
*/ staticvoid
__tape_wake_up(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up((wait_queue_head_t *) data);
}
int
tape_do_io(struct tape_device *device, struct tape_request *request)
{ int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */
request->callback = __tape_wake_up;
request->callback_data = &device->wait_queue; /* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */
wait_event(device->wait_queue, (request->callback == NULL)); /* Get rc from request */ return request->rc;
}
/* * tape_do_io_interruptible/__tape_wake_up_interruptible * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion.
*/ staticvoid
__tape_wake_up_interruptible(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up_interruptible((wait_queue_head_t *) data);
}
int
tape_do_io_interruptible(struct tape_device *device, struct tape_request *request)
{ int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */
request->callback = __tape_wake_up_interruptible;
request->callback_data = &device->wait_queue;
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */
rc = wait_event_interruptible(device->wait_queue,
(request->callback == NULL)); if (rc != -ERESTARTSYS) /* Request finished normally. */ return request->rc;
/* Interrupted by a signal. We have to stop the current request. */
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = __tape_cancel_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc == 0) { /* Wait for the interrupt that acknowledges the halt. */ do {
rc = wait_event_interruptible(
device->wait_queue,
(request->callback == NULL)
);
} while (rc == -ERESTARTSYS);
/* On special conditions irb is an error pointer */ if (IS_ERR(irb)) { /* FIXME: What to do with the request? */ switch (PTR_ERR(irb)) { case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
fallthrough; case -EIO:
__tape_end_request(device, request, -EIO); break; default:
DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
device->cdev_id, PTR_ERR(irb));
} return;
}
/* * If the condition code is not zero and the start function bit is * still set, this is an deferred error and the last start I/O did * not succeed. At this point the condition that caused the deferred * error might still apply. So we just schedule the request to be * started later.
*/ if (irb->scsw.cmd.cc != 0 &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(request->status == TAPE_REQUEST_IN_IO)) {
DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
request->status = TAPE_REQUEST_QUEUED;
schedule_delayed_work(&device->tape_dnr, HZ); return;
}
/* May be an unsolicited irq */ if(request != NULL)
request->rescnt = irb->scsw.cmd.count; elseif ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
!list_empty(&device->req_queue)) { /* Not Ready to Ready after long busy ? */ struct tape_request *req;
req = list_entry(device->req_queue.next, struct tape_request, list); if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); if (timer_delete(&device->lb_timeout)) {
tape_put_device(device);
__tape_start_next_request(device);
} return;
}
} if (irb->scsw.cmd.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
device->tape_generic_status |= GMT_ONLINE(~0); else
device->tape_generic_status &= ~GMT_ONLINE(~0);
/* * Any request that does not come back with channel end * and device end is unusual. Log the sense data.
*/
DBF_EVENT(3,"-- Tape Interrupthandler --\n");
tape_dump_sense_dbf(device, request, irb);
} else { /* Upon normal completion the device _is_ online */
device->tape_generic_status |= GMT_ONLINE(~0);
} if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "tape:device is not operational\n"); return;
}
/* * Request that were canceled still come back with an interrupt. * To detect these request the state will be set to TAPE_REQUEST_DONE.
*/ if(request != NULL && request->status == TAPE_REQUEST_DONE) {
__tape_end_request(device, request, -EIO); return;
}
rc = device->discipline->irq(device, request, irb); /* * rc < 0 : request finished unsuccessfully. * rc == TAPE_IO_SUCCESS: request finished successfully. * rc == TAPE_IO_PENDING: request is still running. Ignore rc. * rc == TAPE_IO_RETRY: request finished but needs another go. * rc == TAPE_IO_STOP: request needs to get terminated.
*/ switch (rc) { case TAPE_IO_SUCCESS: /* Upon normal completion the device _is_ online */
device->tape_generic_status |= GMT_ONLINE(~0);
__tape_end_request(device, request, rc); break; case TAPE_IO_PENDING: break; case TAPE_IO_LONG_BUSY:
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
add_timer(&device->lb_timeout);
request->status = TAPE_REQUEST_LONG_BUSY; break; case TAPE_IO_RETRY:
rc = __tape_start_io(device, request); if (rc)
__tape_end_request(device, request, rc); break; case TAPE_IO_STOP:
rc = __tape_cancel_io(device, request); if (rc)
__tape_end_request(device, request, rc); break; default: if (rc > 0) {
DBF_EVENT(6, "xunknownrc\n");
__tape_end_request(device, request, -EIO);
} else {
__tape_end_request(device, request, rc);
} break;
}
}
/* * Tape device open function used by tape_char frontend.
*/ int
tape_open(struct tape_device *device)
{ int rc;
/* * Tape device release function used by tape_char frontend.
*/ int
tape_release(struct tape_device *device)
{
spin_lock_irq(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_IN_USE)
tape_state_set(device, TS_UNUSED);
module_put(device->discipline->owner);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); return 0;
}
/* * Execute a magnetic tape command a number of times.
*/ int
tape_mtop(struct tape_device *device, int mt_op, int mt_count)
{
tape_mtop_fn fn; int rc;
/* Get rid of the frontends */
tapechar_exit();
tape_proc_cleanup();
debug_unregister (TAPE_DBF_AREA);
}
MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
MODULE_LICENSE("GPL");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.