// SPDX-License-Identifier: GPL-2.0-or-later /* * Parallel SCSI (SPI) transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com>
*/ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/suspend.h> #include <scsi/scsi.h> #include"scsi_priv.h" #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h>
#define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
* on" attributes */ #define SPI_HOST_ATTRS 1
#define SPI_MAX_ECHO_BUFFER_SIZE 4096
#define DV_LOOPS 3 #define DV_TIMEOUT (10*HZ) #define DV_RETRIES 3 /* should only need at most
* two cc/ua clears */
/* Private data accessors (keep these out of the header file) */ #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
staticinlineconstchar *spi_signal_to_string(enum spi_signal_type type)
{ int i;
for (i = 0; i < ARRAY_SIZE(signal_types); i++) { if (type == signal_types[i].value) return signal_types[i].name;
} return NULL;
} staticinlineenum spi_signal_type spi_signal_to_value(constchar *name)
{ int i, len;
for (i = 0; i < ARRAY_SIZE(signal_types); i++) {
len = strlen(signal_types[i].name); if (strncmp(name, signal_types[i].name, len) == 0 &&
(name[len] == '\n' || name[len] == '\0')) return signal_types[i].value;
} return SPI_SIGNAL_UNKNOWN;
}
/* we only care about the first child device that's a real SCSI device
* so we return 1 to terminate the iteration when we find it */ staticint child_iter(struct device *dev, void *data)
{ if (!scsi_is_sdev_device(dev)) return 0;
/* Translate the period into ns according to the current spec
* for SDTR/PPR messages */ staticint period_to_str(char *buf, int period)
{ int len, picosec;
if (period < 0 || period > 0xff) {
picosec = -1;
} elseif (period <= SPI_STATIC_PPR) {
picosec = ppr_to_ps[period];
} else {
picosec = period * 4000;
}
if (picosec == -1) {
len = sprintf(buf, "reserved");
} else {
len = sprint_frac(buf, picosec, 1000);
}
return len;
}
static ssize_t
show_spi_transport_period_helper(char *buf, int period)
{ int len = period_to_str(buf, period);
buf[len++] = '\n';
buf[len] = '\0'; return len;
}
static ssize_t
store_spi_transport_period_helper(struct device *dev, constchar *buf,
size_t count, int *periodp)
{ int j, picosec, period = -1; char *endp;
picosec = simple_strtoul(buf, &endp, 10) * 1000; if (*endp == '.') { int mult = 100; do {
endp++; if (!isdigit(*endp)) break;
picosec += (*endp - '0') * mult;
mult /= 10;
} while (mult > 0);
}
for (j = 0; j <= SPI_STATIC_PPR; j++) { if (ppr_to_ps[j] < picosec) continue;
period = j; break;
}
/* This is for read/write Domain Validation: If the device supports
* an echo buffer, we do read/write tests to it */ staticenum spi_compare_returns
spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
u8 *ptr, constint retries)
{ int len = ptr - buffer; int j, k, r, result; unsignedint pattern = 0x0000ffff; struct scsi_sense_hdr sshdr;
constchar spi_write_buffer[] = {
WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
}; constchar spi_read_buffer[] = {
READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
};
/* set up the pattern buffer. Doesn't matter if we spill
* slightly beyond since that's where the read buffer is */ for (j = 0; j < len; ) {
/* fill the buffer with counting (test a) */ for ( ; j < min(len, 32); j++)
buffer[j] = j;
k = j; /* fill the buffer with alternating words of 0x0 and
* 0xffff (test b) */ for ( ; j < min(len, k + 32); j += 2) {
u16 *word = (u16 *)&buffer[j];
*word = (j & 0x02) ? 0x0000 : 0xffff;
}
k = j; /* fill with crosstalk (alternating 0x5555 0xaaa)
* (test c) */ for ( ; j < min(len, k + 32); j += 2) {
u16 *word = (u16 *)&buffer[j];
*word = (j & 0x02) ? 0x5555 : 0xaaaa;
}
k = j; /* fill with shifting bits (test d) */ for ( ; j < min(len, k + 32); j += 4) {
u32 *word = (unsignedint *)&buffer[j];
u32 roll = (pattern & 0x80000000) ? 1 : 0;
*word = pattern;
pattern = (pattern << 1) | roll;
} /* don't bother with random data (test e) */
}
for (r = 0; r < retries; r++) {
result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
buffer, len, &sshdr); if (result || !scsi_device_online(sdev)) {
scsi_device_set_state(sdev, SDEV_QUIESCE); if (result > 0 && scsi_sense_valid(&sshdr)
&& sshdr.sense_key == ILLEGAL_REQUEST /* INVALID FIELD IN CDB */
&& sshdr.asc == 0x24 && sshdr.ascq == 0x00) /* This would mean that the drive lied * to us about supporting an echo * buffer (unfortunately some Western * Digital drives do precisely this)
*/ return SPI_COMPARE_SKIP_TEST;
/* This is for the simplest form of Domain Validation: a read test
* on the inquiry data from the device */ staticenum spi_compare_returns
spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
u8 *ptr, constint retries)
{ int r, result; constint len = sdev->inquiry_len; constchar spi_inquiry[] = {
INQUIRY, 0, 0, 0, len, 0
};
for (r = 0; r < retries; r++) {
memset(ptr, 0, len);
result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN,
ptr, len, NULL);
for (;;) { int newperiod;
retval = compare_fn(sdev, buffer, ptr, DV_LOOPS);
if (retval == SPI_COMPARE_SUCCESS
|| retval == SPI_COMPARE_SKIP_TEST) break;
/* OK, retrain, fallback */ if (i->f->get_iu)
i->f->get_iu(starget); if (i->f->get_qas)
i->f->get_qas(starget); if (i->f->get_period)
i->f->get_period(sdev->sdev_target);
/* Here's the fallback sequence; first try turning off * IU, then QAS (if we can control them), then finally
* fall down the periods */ if (i->f->set_iu && spi_iu(starget)) {
starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n");
DV_SET(iu, 0);
} elseif (i->f->set_qas && spi_qas(starget)) {
starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n");
DV_SET(qas, 0);
} else {
newperiod = spi_period(starget);
period = newperiod > period ? newperiod : period; if (period < 0x0d)
period++; else
period += period >> 1;
if (unlikely(period > 0xff || period == prevperiod)) { /* Total failure; set to async and return */
starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n");
DV_SET(offset, 0); return SPI_COMPARE_FAILURE;
}
starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n");
DV_SET(period, period);
prevperiod = period;
}
} return retval;
}
staticint
spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
{ int l, result;
/* first off do a test unit ready. This can error out * because of reservations or some other reason. If it * fails, the device won't let us write to the echo buffer
* so just return failure */
/* We send a set of three TURs to clear any outstanding * unit attention conditions if they exist (Otherwise the * buffer tests won't be happy). If the TUR still fails * (reservation conflict, device not ready, etc) just
* skip the write tests */ for (l = 0; ; l++) {
result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN,
NULL, 0, NULL);
result = spi_execute(sdev, spi_read_buffer_descriptor,
REQ_OP_DRV_IN, buffer, 4, NULL);
if (result) /* Device has no echo buffer */ return 0;
return buffer[3] + ((buffer[2] & 0x1f) << 8);
}
staticvoid
spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
{ struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; int min_period = spi_min_period(starget); int max_width = spi_max_width(starget); /* first set us up for narrow async */
DV_SET(offset, 0);
DV_SET(width, 0);
if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
!= SPI_COMPARE_SUCCESS) {
starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); /* FIXME: should probably offline the device here? */ return;
}
if (!spi_support_wide(starget)) {
spi_max_width(starget) = 0;
max_width = 0;
}
/* test width */ if (i->f->set_width && max_width) {
i->f->set_width(starget, 1);
if (spi_dv_device_compare_inquiry(sdev, buffer,
buffer + len,
DV_LOOPS)
!= SPI_COMPARE_SUCCESS) {
starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
i->f->set_width(starget, 0); /* Make sure we don't force wide back on by asking
* for a transfer period that requires it */
max_width = 0; if (min_period < 10)
min_period = 10;
}
}
/* len == -1 is the signal that we need to ascertain the * presence of an echo buffer before trying to use it. len ==
* 0 means we don't have an echo buffer */
len = -1;
retry:
/* now set up to the maximum */
DV_SET(offset, spi_max_offset(starget));
DV_SET(period, min_period);
/* try QAS requests; this should be harmless to set if the
* target supports it */ if (spi_support_qas(starget) && spi_max_qas(starget)) {
DV_SET(qas, 1);
} else {
DV_SET(qas, 0);
}
if (spi_support_ius(starget) && spi_max_iu(starget) &&
min_period < 9) { /* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1); /* Then set the optional parameters */
DV_SET(rd_strm, 1);
DV_SET(wr_flow, 1);
DV_SET(rti, 1); if (min_period == 8)
DV_SET(pcomp_en, 1);
} else {
DV_SET(iu, 0);
}
/* now that we've done all this, actually check the bus * signal type (if known). Some devices are stupid on
* a SE bus and still claim they can try LVD only settings */ if (i->f->get_signalling)
i->f->get_signalling(shost); if (spi_signalling(shost) == SPI_SIGNAL_SE ||
spi_signalling(shost) == SPI_SIGNAL_HVD ||
!spi_support_dt(starget)) {
DV_SET(dt, 0);
} else {
DV_SET(dt, 1);
} /* set width last because it will pull all the other
* parameters down to required values */
DV_SET(width, max_width);
/* Do the read only INQUIRY tests */
spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
spi_dv_device_compare_inquiry); /* See if we actually managed to negotiate and sustain DT */ if (i->f->get_dt)
i->f->get_dt(starget);
/* see if the device has an echo buffer. If it does we can do * the SPI pattern write tests. Because of some broken * devices, we *only* try this on a device that has actually
* negotiated DT */
if (len == -1 && spi_dt(starget))
len = spi_dv_device_get_echo_buffer(sdev, buffer);
if (len > SPI_MAX_ECHO_BUFFER_SIZE) {
starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE);
len = SPI_MAX_ECHO_BUFFER_SIZE;
}
if (spi_dv_retrain(sdev, buffer, buffer + len,
spi_dv_device_echo_buffer)
== SPI_COMPARE_SKIP_TEST) { /* OK, the stupid drive can't do a write echo buffer
* test after all, fall back to the read tests */
len = 0; goto retry;
}
}
/** * spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the * current execution thread. Since DV operations may sleep, * the current thread must have user context. Also no SCSI * related locks that would deadlock I/O issued by the DV may * be held.
*/ void
spi_dv_device(struct scsi_device *sdev)
{ struct scsi_target *starget = sdev->sdev_target; constint len = SPI_MAX_ECHO_BUFFER_SIZE*2; unsignedint sleep_flags;
u8 *buffer;
/* * Because this function and the power management code both call * scsi_device_quiesce(), it is not safe to perform domain validation * while suspend or resume is in progress. Hence the * lock/unlock_system_sleep() calls.
*/
sleep_flags = lock_system_sleep();
if (scsi_autopm_get_device(sdev)) goto unlock_system_sleep;
if (unlikely(spi_dv_in_progress(starget))) goto put_autopm;
if (unlikely(scsi_device_get(sdev))) goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer)) goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) goto free_buffer;
/** * spi_schedule_dv_device - schedule domain validation to occur on the device * @sdev: The device to validate * * Identical to spi_dv_device() above, except that the DV will be * scheduled to occur in a workqueue later. All memory allocations * are atomic, so may be called from any context including those holding * SCSI locks.
*/ void
spi_schedule_dv_device(struct scsi_device *sdev)
{ struct work_queue_wrapper *wqw =
kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
if (unlikely(!wqw)) return;
if (unlikely(spi_dv_pending(sdev->sdev_target))) {
kfree(wqw); return;
} /* Set pending early (dv_device doesn't check it, only sets it) */
spi_dv_pending(sdev->sdev_target) = 1; if (unlikely(scsi_device_get(sdev))) {
kfree(wqw);
spi_dv_pending(sdev->sdev_target) = 0; return;
}
/** * spi_display_xfer_agreement - Print the current target transfer agreement * @starget: The target for which to display the agreement * * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; more detailed information is available in sysfs.
*/ void spi_display_xfer_agreement(struct scsi_target *starget)
{ struct spi_transport_attrs *tp;
tp = (struct spi_transport_attrs *)&starget->starget_data;
int spi_populate_sync_msg(unsignedchar *msg, int period, int offset)
{
msg[0] = EXTENDED_MESSAGE;
msg[1] = 3;
msg[2] = EXTENDED_SDTR;
msg[3] = period;
msg[4] = offset; return 5;
}
EXPORT_SYMBOL_GPL(spi_populate_sync_msg);
int spi_populate_ppr_msg(unsignedchar *msg, int period, int offset, int width, int options)
{
msg[0] = EXTENDED_MESSAGE;
msg[1] = 6;
msg[2] = EXTENDED_PPR;
msg[3] = period;
msg[4] = 0;
msg[5] = offset;
msg[6] = width;
msg[7] = options; return 8;
}
EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
/** * spi_populate_tag_msg - place a tag message in a buffer * @msg: pointer to the area to place the tag * @cmd: pointer to the scsi command for the tag * * Notes: * designed to create the correct type of tag message for the * particular request. Returns the size of the tag message. * May return 0 if TCQ is disabled for this device.
**/ int spi_populate_tag_msg(unsignedchar *msg, struct scsi_cmnd *cmd)
{ if (cmd->flags & SCMD_TAGGED) {
*msg++ = SIMPLE_QUEUE_TAG;
*msg++ = scsi_cmd_to_rq(cmd)->tag; return 2;
}
sdev = to_scsi_device(dev);
shost = sdev->host; if (!shost->transportt || shost->transportt->host_attrs.ac.class
!= &spi_host_class.class) return 0; /* Note: this class has no device attributes, so it has * no per-HBA allocation and thus we don't need to distinguish
* the attribute containers for the device */
i = to_spi_internal(shost->transportt); if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) return 0; return 1;
}
if (si->f->set_signalling)
rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
return rc;
}
/* returns true if we should be showing the variable. Also * overloads the return by setting 1<<1 if the attribute should
* be writeable */ #define TARGET_ATTRIBUTE_HELPER(name) \
(si->f->show_##name ? S_IRUGO : 0) | \
(si->f->set_##name ? S_IWUSR : 0)
static __init int spi_transport_init(void)
{ int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, "SCSI Parallel Transport Class"); if (!error) { int i;
for (i = 0; spi_static_device_list[i].vendor; i++)
scsi_dev_info_list_add_keyed(1, /* compatible */
spi_static_device_list[i].vendor,
spi_static_device_list[i].model,
NULL,
spi_static_device_list[i].flags,
SCSI_DEVINFO_SPI);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.