/* * Allocate memory for a new device structure.
*/ struct dasd_block *dasd_alloc_block(void)
{ struct dasd_block *block;
block = kzalloc(sizeof(*block), GFP_ATOMIC); if (!block) return ERR_PTR(-ENOMEM); /* open_count = 0 means device online but not in use */
atomic_set(&block->open_count, -1);
/* * Free memory of a device structure.
*/ void dasd_free_block(struct dasd_block *block)
{
kfree(block);
}
EXPORT_SYMBOL_GPL(dasd_free_block);
/* * Make a new device known to the system.
*/ staticint dasd_state_new_to_known(struct dasd_device *device)
{ /* * As long as the device is not in state DASD_STATE_NEW we want to * keep the reference count > 0.
*/
dasd_get_device(device);
device->state = DASD_STATE_KNOWN; return 0;
}
/* * Let the system forget about a device.
*/ staticint dasd_state_known_to_new(struct dasd_device *device)
{ /* Disable extended error reporting for this device. */
dasd_eer_disable(device);
device->state = DASD_STATE_NEW;
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device); return 0;
}
if (!base_dentry) return NULL;
pde = debugfs_create_dir(name, base_dentry); if (!pde || IS_ERR(pde)) return NULL; return pde;
}
/* * Request the irq line for the device.
*/ staticint dasd_state_known_to_basic(struct dasd_device *device)
{ struct dasd_block *block = device->block; int rc = 0;
/* Allocate and register gendisk structure. */ if (block) {
rc = dasd_gendisk_alloc(block); if (rc) return rc;
block->debugfs_dentry =
dasd_debugfs_setup(block->gdp->disk_name,
dasd_debugfs_root_entry);
dasd_profile_init(&block->profile, block->debugfs_dentry); if (dasd_global_profile_level == DASD_PROFILE_ON)
dasd_profile_on(&device->block->profile);
}
device->debugfs_dentry =
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
dasd_hosts_init(device->debugfs_dentry, device);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
8 * sizeof(long));
debug_register_view(device->debug_area, &debug_sprintf_view);
debug_set_level(device->debug_area, DBF_WARNING);
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
return rc;
}
/* * Release the irq line for the device. Terminate any running i/o.
*/ staticint dasd_state_basic_to_known(struct dasd_device *device)
{ int rc;
if (device->discipline->basic_to_known) {
rc = device->discipline->basic_to_known(device); if (rc) return rc;
}
/* * Do the initial analysis. The do_analysis function may return * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC * until the discipline decides to continue the startup sequence * by calling the function dasd_change_state. The eckd disciplines * uses this to start a ccw that detects the format. The completion * interrupt for this detection ccw uses the kernel event daemon to * trigger the call to dasd_change_state. All this is done in the * discipline code, see dasd_eckd.c. * After the analysis ccw is done (do_analysis returned 0) the block * device is setup. * In case the analysis returns an error, the device setup is stopped * (a fake disk was already added to allow formatting).
*/ staticint dasd_state_basic_to_ready(struct dasd_device *device)
{ struct dasd_block *block = device->block; struct queue_limits lim; int rc = 0;
/* make disk known with correct capacity */ if (!block) {
device->state = DASD_STATE_READY; goto out;
}
if (block->base->discipline->do_analysis != NULL)
rc = block->base->discipline->do_analysis(block); if (rc) { if (rc == -EAGAIN) return rc;
device->state = DASD_STATE_UNFMT;
kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
KOBJ_CHANGE); goto out;
}
lim = queue_limits_start_update(block->gdp->queue);
lim.max_dev_sectors = device->discipline->max_sectors(block);
lim.max_hw_sectors = lim.max_dev_sectors;
lim.logical_block_size = block->bp_block; /* * Adjust dma_alignment to match block_size - 1 * to ensure proper buffer alignment checks in the block layer.
*/
lim.dma_alignment = lim.logical_block_size - 1;
if (device->discipline->has_discard) { unsignedint max_bytes;
lim.discard_granularity = block->bp_block;
/* Calculate max_discard_sectors and make it PAGE aligned */
max_bytes = USHRT_MAX * block->bp_block;
max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
out: if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device); return rc;
}
staticinline int _wait_for_empty_queues(struct dasd_device *device)
{ if (device->block) return list_empty(&device->ccw_queue) &&
list_empty(&device->block->ccw_queue); else return list_empty(&device->ccw_queue);
}
/* * Remove device from block device layer. Destroy dirty buffers. * Forget format information. Check if the target level is basic * and if it is create fake disk for formatting.
*/ staticint dasd_state_ready_to_basic(struct dasd_device *device)
{ int rc;
/* * Back to basic.
*/ staticint dasd_state_unfmt_to_basic(struct dasd_device *device)
{
device->state = DASD_STATE_BASIC; return 0;
}
/* * Make the device online and schedule the bottom half to start * the requeueing of requests from the linux request queue to the * ccw queue.
*/ staticint
dasd_state_ready_to_online(struct dasd_device * device)
{
device->state = DASD_STATE_ONLINE; if (device->block) {
dasd_schedule_block_bh(device->block); if ((device->features & DASD_FEATURE_USERAW)) {
kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
KOBJ_CHANGE); return 0;
}
disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
} return 0;
}
/* * Stop the requeueing of requests again.
*/ staticint dasd_state_online_to_ready(struct dasd_device *device)
{ int rc;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device); if (rc) return rc;
}
/* * This is the main startup/shutdown routine.
*/ staticvoid dasd_change_state(struct dasd_device *device)
{ int rc;
if (device->state == device->target) /* Already where we want to go today... */ return; if (device->state < device->target)
rc = dasd_increase_state(device); else
rc = dasd_decrease_state(device); if (rc == -EAGAIN) return; if (rc)
device->target = device->state;
/* let user-space know that the device status changed */
kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
if (device->state == device->target)
wake_up(&dasd_init_waitq);
}
/* * Kick starter for devices that did not complete the startup/shutdown * procedure or were sleeping because of a pending state. * dasd_kick_device will schedule a call do do_kick_device to the kernel * event daemon.
*/ staticvoid do_kick_device(struct work_struct *work)
{ struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
mutex_lock(&device->state_mutex);
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
void dasd_kick_device(struct dasd_device *device)
{
dasd_get_device(device); /* queue call to dasd_kick_device to the kernel event daemon. */ if (!schedule_work(&device->kick_work))
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_kick_device);
/* * dasd_reload_device will schedule a call do do_reload_device to the kernel * event daemon.
*/ staticvoid do_reload_device(struct work_struct *work)
{ struct dasd_device *device = container_of(work, struct dasd_device,
reload_device);
device->discipline->reload(device);
dasd_put_device(device);
}
void dasd_reload_device(struct dasd_device *device)
{
dasd_get_device(device); /* queue call to dasd_reload_device to the kernel event daemon. */ if (!schedule_work(&device->reload_device))
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_reload_device);
/* * Set the target state for a device and starts the state change.
*/ void dasd_set_target_state(struct dasd_device *device, int target)
{
dasd_get_device(device);
mutex_lock(&device->state_mutex); /* If we are in probeonly mode stop at DASD_STATE_READY. */ if (dasd_probeonly && target > DASD_STATE_READY)
target = DASD_STATE_READY; if (device->target != target) { if (device->state == target)
wake_up(&dasd_init_waitq);
device->target = target;
} if (device->state != device->target)
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
}
/* * Enable devices with device numbers in [from..to].
*/ staticinlineint _wait_for_device(struct dasd_device *device)
{ return (device->state == device->target);
}
void dasd_enable_device(struct dasd_device *device)
{
dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) /* No discipline for device found. */
dasd_set_target_state(device, DASD_STATE_NEW); /* Now wait for the devices to come up. */
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_reload_device(device); if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
EXPORT_SYMBOL(dasd_enable_device);
/* * Add profiling information for cqr before execution.
*/ staticvoid dasd_profile_start(struct dasd_block *block, struct dasd_ccw_req *cqr, struct request *req)
{ struct list_head *l; unsignedint counter; struct dasd_device *device;
/* count the length of the chanq for statistics */
counter = 0; if (dasd_global_profile_level || block->profile.data)
list_for_each(l, &block->ccw_queue) if (++counter >= 31) break;
spin_lock(&dasd_global_profile.lock); if (dasd_global_profile.data) {
dasd_global_profile.data->dasd_io_nr_req[counter]++; if (rq_data_dir(req) == READ)
dasd_global_profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock); if (block->profile.data) {
block->profile.data->dasd_io_nr_req[counter]++; if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&block->profile.lock);
/* * We count the request for the start device, even though it may run on * some other device due to error recovery. This way we make sure that * we count each request only once.
*/
device = cqr->startdev; if (!device->profile.data) return;
spin_lock(get_ccwdev_lock(device->cdev));
counter = 1; /* request is not yet queued on the start device */
list_for_each(l, &device->ccw_queue) if (++counter >= 31) break;
spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock(&device->profile.lock);
device->profile.data->dasd_io_nr_req[counter]++; if (rq_data_dir(req) == READ)
device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}
/* * Add profiling information for cqr after execution.
*/
#define dasd_profile_counter(value, index) \
{ \ for (index = 0; index < 31 && value >> (2+index); index++) \
; \
}
staticvoid dasd_profile_end_add_data(struct dasd_profile_info *data, int is_alias, int is_tpm, int is_read, long sectors, int sectors_ind, int tottime_ind, int tottimeps_ind, int strtime_ind, int irqtime_ind, int irqtimeps_ind, int endtime_ind)
{ /* in case of an overflow, reset the whole profile */ if (data->dasd_io_reqs == UINT_MAX) {
memset(data, 0, sizeof(*data));
ktime_get_real_ts64(&data->starttod);
}
data->dasd_io_reqs++;
data->dasd_io_sects += sectors; if (is_alias)
data->dasd_io_alias++; if (is_tpm)
data->dasd_io_tpm++;
/* * Terminate the current i/o and set the request to clear_pending. * Timer keeps device runnig. * ccw_device_clear can fail if the i/o subsystem * is in a bad mood.
*/ int dasd_term_IO(struct dasd_ccw_req *cqr)
{ struct dasd_device *device; int retries, rc;
/* Check the cqr */
rc = dasd_check_cqr(cqr); if (rc) return rc;
retries = 0;
device = (struct dasd_device *) cqr->startdev; while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
rc = ccw_device_clear(device->cdev, (long) cqr); switch (rc) { case 0: /* termination successful */
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_tod_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device, "terminate cqr %p successful",
cqr); break; case -ENODEV:
DBF_DEV_EVENT(DBF_ERR, device, "%s", "device gone, retry"); break; case -EINVAL: /* * device not valid so no I/O could be running * handle CQR as termination successful
*/
cqr->status = DASD_CQR_CLEARED;
cqr->stopclk = get_tod_clock();
cqr->starttime = 0; /* no retries for invalid devices */
cqr->retries = -1;
DBF_DEV_EVENT(DBF_ERR, device, "%s", "EINVAL, handle as terminated"); /* fake rc to success */
rc = 0; break; default:
dev_err(&device->cdev->dev, "Unexpected error during request termination %d\n", rc);
BUG(); break;
}
retries++;
}
dasd_schedule_device_bh(device); return rc;
}
EXPORT_SYMBOL(dasd_term_IO);
/* * Start the i/o. This start_IO can fail if the channel is really busy. * In that case set up a timer to start the request later.
*/ int dasd_start_IO(struct dasd_ccw_req *cqr)
{ struct dasd_device *device; int rc;
/* Check the cqr */
rc = dasd_check_cqr(cqr); if (rc) {
cqr->intrc = rc; return rc;
}
device = (struct dasd_device *) cqr->startdev; if (((cqr->block &&
test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " "because of stolen lock", cqr);
cqr->status = DASD_CQR_ERROR;
cqr->intrc = -EPERM; return -EPERM;
} if (cqr->retries < 0) {
dev_err(&device->cdev->dev, "Start I/O ran out of retries\n");
cqr->status = DASD_CQR_ERROR; return -EIO;
}
cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->lpm &= dasd_path_get_opm(device); if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
} /* * remember the amount of formatted tracks to prevent double format on * ESE devices
*/ if (cqr->block)
cqr->trkcount = atomic_read(&cqr->block->trkcount);
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
} else {
rc = ccw_device_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm, 0);
} switch (rc) { case 0:
cqr->status = DASD_CQR_IN_IO; break; case -EBUSY:
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: device busy, retry later"); break; case -EACCES: /* -EACCES indicates that the request used only a subset of the * available paths and all these paths are gone. If the lpm of * this request was only a subset of the opm (e.g. the ppm) then * we just do a retry with all available paths. * If we already use the full opm, something is amiss, and we * need a full path verification.
*/ if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
DBF_DEV_EVENT(DBF_WARNING, device, "start_IO: selected paths gone (%x)",
cqr->lpm);
} elseif (cqr->lpm != dasd_path_get_opm(device)) {
cqr->lpm = dasd_path_get_opm(device);
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: selected paths gone," " retry on all paths");
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: all paths in opm gone," " do path verification");
dasd_generic_last_path_gone(device);
dasd_path_no_path(device);
dasd_path_set_tbvpm(device,
ccw_device_get_path_mask(
device->cdev));
} break; case -ENODEV:
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -ENODEV device gone, retry"); /* this is equivalent to CC=3 for SSCH report this to EER */
dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); break; case -EIO:
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -EIO device gone, retry"); break; case -EINVAL:
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -EINVAL device currently " "not accessible"); break; default:
dev_err(&device->cdev->dev, "Unexpected error during request start %d", rc);
BUG(); break;
}
cqr->intrc = rc; return rc;
}
EXPORT_SYMBOL(dasd_start_IO);
/* * Timeout function for dasd devices. This is used for different purposes * 1) missing interrupt handler for normal operation * 2) delayed start of request where start_IO failed with -EBUSY * 3) timeout for missing state change interrupts * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), * DASD_CQR_QUEUED for 2) and 3).
*/ staticvoid dasd_device_timeout(struct timer_list *t)
{ unsignedlong flags; struct dasd_device *device;
now = get_tod_clock(); /* check for conditions that should be handled immediately */ if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
scsw_cstat(&irb->scsw) == 0)) { if (cqr)
memcpy(&cqr->irb, irb, sizeof(*irb));
device = dasd_device_from_cdev_locked(cdev); if (IS_ERR(device)) return; /* ignore unsolicited interrupts for DIAG discipline */ if (device->discipline == dasd_diag_discipline_pointer) {
dasd_put_device(device); return;
}
/* * In some cases 'File Protected' or 'No Record Found' errors * might be expected and debug log messages for the * corresponding interrupts shouldn't be written then. * Check if either of the according suppress bits is set.
*/
sense = dasd_get_sense(irb); if (sense) {
it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) &&
!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
/* * Extent pool probably out-of-space. * Stop device and check exhaust level.
*/ if (dasd_ese_oos_cond(sense)) {
dasd_generic_space_exhaust(device, cqr);
device->discipline->ext_pool_exhaust(device, cqr);
dasd_put_device(device); return;
}
} if (!(it_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
/* * If we have an error on a dasd_block layer request then we cancel * and return all further requests from the same dasd_block as well.
*/ staticvoid __dasd_device_recovery(struct dasd_device *device, struct dasd_ccw_req *ref_cqr)
{ struct list_head *l, *n; struct dasd_ccw_req *cqr;
/* * only requeue request that came from the dasd_block layer
*/ if (!ref_cqr->block) return;
/* * Remove those ccw requests from the queue that need to be returned * to the upper layer.
*/ staticvoid __dasd_device_process_ccw_queue(struct dasd_device *device, struct list_head *final_queue)
{ struct list_head *l, *n; struct dasd_ccw_req *cqr;
/* Process request with final status. */
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
/* Skip any non-final request. */ if (cqr->status == DASD_CQR_QUEUED ||
cqr->status == DASD_CQR_IN_IO ||
cqr->status == DASD_CQR_CLEAR_PENDING) continue; if (cqr->status == DASD_CQR_ERROR) {
__dasd_device_recovery(device, cqr);
} /* Rechain finished requests to final queue */
list_move_tail(&cqr->devlist, final_queue);
}
}
staticvoid __dasd_process_cqr(struct dasd_device *device, struct dasd_ccw_req *cqr)
{ switch (cqr->status) { case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE; break; case DASD_CQR_ERROR:
cqr->status = DASD_CQR_NEED_ERP; break; case DASD_CQR_CLEARED:
cqr->status = DASD_CQR_TERMINATED; break; default:
dev_err(&device->cdev->dev, "Unexpected CQR status %02x", cqr->status);
BUG();
} if (cqr->callback)
cqr->callback(cqr, cqr->callback_data);
}
/* * the cqrs from the final queue are returned to the upper layer * by setting a dasd_block state and calling the callback function
*/ staticvoid __dasd_device_process_final_queue(struct dasd_device *device, struct list_head *final_queue)
{ struct list_head *l, *n; struct dasd_ccw_req *cqr; struct dasd_block *block;
/* * check if device should be autoquiesced due to too many timeouts
*/ staticvoid __dasd_device_check_autoquiesce_timeout(struct dasd_device *device, struct dasd_ccw_req *cqr)
{ if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
}
/* * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO.
*/ staticvoid __dasd_device_check_expire(struct dasd_device *device)
{ struct dasd_ccw_req *cqr;
if (list_empty(&device->ccw_queue)) return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) { if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* * IO in safe offline processing should not * run out of retries
*/
cqr->retries++;
} if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev, "CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
(cqr->expires / HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev, "CQR timed out (%lus), %i retries remaining\n",
(cqr->expires / HZ), cqr->retries);
}
__dasd_device_check_autoquiesce_timeout(device, cqr);
}
}
/* * return 1 when device is not eligible for IO
*/ staticint __dasd_device_is_unusable(struct dasd_device *device, struct dasd_ccw_req *cqr)
{ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* * dasd is being set offline * but it is no safe offline where we have to allow I/O
*/ return 1;
} if (device->stopped) { if (device->stopped & mask) { /* stopped and CQR will not change that. */ return 1;
} if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { /* CQR is not able to change device to
* operational. */ return 1;
} /* CQR required to get device operational. */
} return 0;
}
/* * Take a look at the first request on the ccw queue and check * if it needs to be started.
*/ staticvoid __dasd_device_start_head(struct dasd_device *device)
{ struct dasd_ccw_req *cqr; int rc;
if (list_empty(&device->ccw_queue)) return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; /* if device is not usable return request to upper layer */ if (__dasd_device_is_unusable(device, cqr)) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device); return;
}
rc = device->discipline->start_IO(cqr); if (rc == 0)
dasd_device_set_timer(device, cqr->expires); elseif (rc == -EACCES) {
dasd_schedule_device_bh(device);
} else /* Hmpf, try again in 1/2 sec */
dasd_device_set_timer(device, 50);
}
staticvoid __dasd_device_check_path_events(struct dasd_device *device)
{
__u8 tbvpm, fcsecpm; int rc;
/* * Go through all request on the dasd_device request queue, * terminate them on the cdev if necessary, and return them to the * submitting layer via callback. * Note: * Make sure that all 'submitting layers' still exist when * this function is called!. In other words, when 'device' is a base * device then all block layer requests must have been removed before * via dasd_flush_block_queue.
*/ int dasd_flush_device_queue(struct dasd_device *device)
{ struct dasd_ccw_req *cqr, *n; int rc; struct list_head flush_queue;
INIT_LIST_HEAD(&flush_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { /* Check status and move request to flush_queue */ switch (cqr->status) { case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr); if (rc) { /* unable to terminate request */
dev_err(&device->cdev->dev, "Flushing the DASD request queue failed\n"); /* stop flush processing */ goto finished;
} break; case DASD_CQR_QUEUED:
cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_CLEARED; break; default: /* no need to modify the others */ break;
}
list_move_tail(&cqr->devlist, &flush_queue);
}
finished:
spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* * After this point all requests must be in state CLEAR_PENDING, * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become * one of the others.
*/
list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING)); /* * Now set each request back to TERMINATED, DONE or NEED_ERP * and call the callback function of flushed requests
*/
__dasd_device_process_final_queue(device, &flush_queue); return rc;
}
EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/* * Acquire the device lock and process queues for the device.
*/ staticvoid dasd_device_tasklet(unsignedlong data)
{ struct dasd_device *device = (struct dasd_device *) data; struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Check expire time of first request on the ccw queue. */
__dasd_device_check_expire(device); /* find final requests on ccw queue */
__dasd_device_process_ccw_queue(device, &final_queue);
__dasd_device_check_path_events(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of requests with final status */
__dasd_device_process_final_queue(device, &final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Now check if the head of the ccw queue needs to be started. */
__dasd_device_start_head(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (waitqueue_active(&shutdown_waitq))
wake_up(&shutdown_waitq);
dasd_put_device(device);
}
/* * Schedules a call to dasd_tasklet over the device tasklet.
*/ void dasd_schedule_device_bh(struct dasd_device *device)
{ /* Protect against rescheduling. */ if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) return;
dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet);
}
EXPORT_SYMBOL(dasd_schedule_device_bh);
void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
{
device->stopped &= ~bits; if (!device->stopped)
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
/* * Queue a request to the head of the device ccw_queue. * Start the I/O if possible.
*/ void dasd_add_request_head(struct dasd_ccw_req *cqr)
{ struct dasd_device *device; unsignedlong flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue); /* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
EXPORT_SYMBOL(dasd_add_request_head);
/* * Queue a request to the tail of the device ccw_queue. * Start the I/O if possible.
*/ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
{ struct dasd_device *device; unsignedlong flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add_tail(&cqr->devlist, &device->ccw_queue); /* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
EXPORT_SYMBOL(dasd_add_request_tail);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.