#define GENERIC /* let probe() bind using module params */
/* Some devices that can be used for testing will have "real" drivers. * Entries for those need to be enabled here by hand, after disabling * that "real" driver.
*/ //#define IBOT2 /* grab iBOT2 webcams */ //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
/* this is accessed only through usbfs ioctl calls. * one ioctl to issue a test ... one lock per device. * tests create other threads if they need them. * urbs and buffers are allocated dynamically, * and data generated deterministically.
*/ struct usbtest_dev { struct usb_interface *intf; struct usbtest_info *info; int in_pipe; int out_pipe; int in_iso_pipe; int out_iso_pipe; int in_int_pipe; int out_int_pipe; struct usb_endpoint_descriptor *iso_in, *iso_out; struct usb_endpoint_descriptor *int_in, *int_out; struct mutex lock;
in = out = NULL;
iso_in = iso_out = NULL;
int_in = int_out = NULL;
alt = intf->altsetting + tmp;
if (override_alt >= 0 &&
override_alt != alt->desc.bAlternateSetting) continue;
/* take the first altsetting with in-bulk + out-bulk; * ignore other endpoints and altsettings.
*/ for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { struct usb_host_endpoint *e; int edi;
e = alt->endpoint + ep;
edi = usb_endpoint_dir_in(&e->desc);
switch (usb_endpoint_type(&e->desc)) { case USB_ENDPOINT_XFER_BULK:
endpoint_update(edi, &in, &out, e); continue; case USB_ENDPOINT_XFER_INT: if (dev->info->intr)
endpoint_update(edi, &int_in, &int_out, e); continue; case USB_ENDPOINT_XFER_ISOC: if (dev->info->iso)
endpoint_update(edi, &iso_in, &iso_out, e);
fallthrough; default: continue;
}
} if ((in && out) || iso_in || iso_out || int_in || int_out) goto found;
} return -EINVAL;
/* Support for testing basic non-queued I/O streams. * * These just package urbs as requests that can be easily canceled. * Each urb's data buffer is dynamically allocated; callers can fill * them with non-zero test data (or test for it) when appropriate.
*/
if (!urb->transfer_buffer) {
usb_free_urb(urb); return NULL;
}
/* To test unaligned transfers add an offset and fill the
unused memory with a guard value */ if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset; if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes); return urb;
}
int ret = check_guard_bytes(tdev, urb); if (ret) return ret;
for (i = 0; i < len; i++, buf++) { switch (pattern) { /* all-zeroes has no synchronization issues */ case 0:
expected = 0; break; /* mod63 stays in sync with short-terminated transfers, * or otherwise when host and gadget agree on how large * each usb transfer request should be. resync is done * with set_interface or set_config.
*/ case 1: /* mod63 */
expected = (i % maxpacket) % 63; break; /* always fail unsupported patterns */ default:
expected = !*buf; break;
} if (*buf == expected) continue;
ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected); return -EINVAL;
} return 0;
}
if (retval) break;
mod_timer(&timeout.timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req); if (!timer_delete_sync(&timeout.timer))
retval = -ETIMEDOUT; else
retval = req->status;
timer_destroy_on_stack(&timeout.timer);
/* FIXME check resulting data pattern */
/* FIXME if endpoint halted, clear halt (and log) */
}
/* FIXME for unlink or fault handling tests, don't report * failure if retval is as we expected ...
*/ if (retval)
ERROR(tdev, "perform_sglist failed, " "iterations left %d, status %d\n",
iterations, retval); return retval;
}
/* unqueued control message testing * * there's a nice set of device functional requirements in chapter 9 of the * usb 2.0 spec, which we can apply to ANY device, even ones that don't use * special test firmware. * * we know the device is configured (or suspended) by the time it's visible * through usbfs. we can't change that, so we won't test enumeration (which * worked 'well enough' to get here, this time), power management (ditto), * or remote wakeup (which needs human interaction).
*/
/* * only bit[1] of bmAttributes is used for LTM and others are * reserved
*/ if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in bmAttributes\n"); return 0;
}
/* bits[0:3] of wSpeedSupported is used and others are reserved */ if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in wSpeedSupported\n"); return 0;
}
/* sanity test for standard requests working with usb_control_mesg() and some * of the utility functions which use it. * * this doesn't test how endpoint halts behave or data toggles get set, since * we won't do I/O to bulk/interrupt endpoints here (which is how to change * halt or toggle). toggle testing is impractical without support from hcds. * * this avoids failing devices linux would normally work with, by not testing * config/altsetting operations for devices that only support their defaults. * such devices rarely support those needless operations. * * NOTE that since this is a sanity test, it's not examining boundary cases * to see if usbcore, hcd, and device all behave right. such testing would * involve varied read sizes and other operation sequences.
*/ staticint ch9_postconfig(struct usbtest_dev *dev)
{ struct usb_interface *iface = dev->intf; struct usb_device *udev = interface_to_usbdev(iface); int i, alt, retval;
/* [9.2.3] if there's more than one altsetting, we need to be able to * set and get each one. mostly trusts the descriptors from usbcore.
*/ for (i = 0; i < iface->num_altsetting; i++) {
/* 9.2.3 constrains the range here */
alt = iface->altsetting[i].desc.bAlternateSetting; if (alt < 0 || alt >= iface->num_altsetting) {
dev_err(&iface->dev, "invalid alt [%d].bAltSetting = %d\n",
i, alt);
}
/* [real world] get/set unimplemented if there's only one */ if (realworld && iface->num_altsetting == 1) continue;
/* [9.4.4] get_interface always works */
retval = get_altsetting(dev); if (retval != alt) {
dev_err(&iface->dev, "get alt should be %d, was %d\n",
alt, retval); return (retval < 0) ? retval : -EDOM;
}
}
/* [real world] get_config unimplemented if there's only one */ if (!realworld || udev->descriptor.bNumConfigurations != 1) { int expected = udev->actconfig->desc.bConfigurationValue;
/* [9.4.2] get_configuration always works * ... although some cheap devices (like one TI Hub I've got) * won't return config descriptors except before set_config.
*/
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_CONFIGURATION,
USB_DIR_IN | USB_RECIP_DEVICE,
0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); if (retval != 1 || dev->buf[0] != expected) {
dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
retval, dev->buf[0], expected); return (retval < 0) ? retval : -EDOM;
}
}
switch (header->bDevCapabilityType) { case USB_CAP_TYPE_EXT: if (buf + USB_DT_USB_EXT_CAP_SIZE >
dev->buf + total ||
!is_good_ext(dev, buf)) {
dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n"); return -EDOM;
} break; case USB_SS_CAP_TYPE: if (buf + USB_DT_USB_SS_CAP_SIZE >
dev->buf + total ||
!is_good_ss_cap(dev, buf)) {
dev_err(&iface->dev, "bogus superspeed device capability descriptor\n"); return -EDOM;
} break; case CONTAINER_ID_TYPE: if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
dev->buf + total ||
!is_good_con_id(dev, buf)) {
dev_err(&iface->dev, "bogus container id descriptor\n"); return -EDOM;
} break; default: break;
}
}
}
/* there's always [9.4.3] at least one config descriptor [9.6.3] */ for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
dev->buf, TBUF_SIZE); if (!is_good_config(dev, retval)) {
dev_err(&iface->dev, "config [%d] descriptor --> %d\n",
i, retval); return (retval < 0) ? retval : -EDOM;
}
/* FIXME cross-checking udev->config[i] to make sure usbcore * parsed it right (etc) would be good testing paranoia
*/
}
/* and sometimes [9.2.6.6] speed dependent descriptors */ if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) { struct usb_qualifier_descriptor *d = NULL;
/* device qualifier [9.6.2] */
retval = usb_get_descriptor(udev,
USB_DT_DEVICE_QUALIFIER, 0, dev->buf, sizeof(struct usb_qualifier_descriptor)); if (retval == -EPIPE) { if (udev->speed == USB_SPEED_HIGH) {
dev_err(&iface->dev, "hs dev qualifier --> %d\n",
retval); return retval;
} /* usb2.0 but not high-speed capable; fine */
} elseif (retval != sizeof(struct usb_qualifier_descriptor)) {
dev_err(&iface->dev, "dev qualifier --> %d\n", retval); return (retval < 0) ? retval : -EDOM;
} else
d = (struct usb_qualifier_descriptor *) dev->buf;
/* might not have [9.6.2] any other-speed configs [9.6.4] */ if (d) { unsigned max = d->bNumConfigurations; for (i = 0; i < max; i++) {
retval = usb_get_descriptor(udev,
USB_DT_OTHER_SPEED_CONFIG, i,
dev->buf, TBUF_SIZE); if (!is_good_config(dev, retval)) {
dev_err(&iface->dev, "other speed config --> %d\n",
retval); return (retval < 0) ? retval : -EDOM;
}
}
}
} /* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf); if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval); return retval;
}
/* FIXME configuration.bmAttributes says if we could try to set/clear * the device's remote wakeup feature ... if we can, test that here
*/
retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf); if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval); return retval;
} /* FIXME get status for each endpoint in the interface */
/* use ch9 requests to test whether: * (a) queues work for control, keeping N subtests queued and * active (auto-resubmit) for M loops through the queue. * (b) protocol stalls (control-only) will autorecover. * it's not like bulk/intr; no halt clearing. * (c) short control reads are reported and handled. * (d) queues are always processed in-order
*/
/* queue must transfer and complete in fifo order, unless * usb_unlink_urb() is used to unlink something not at the * physical queue head (not tested).
*/ if (subcase->number > 0) { if ((subcase->number - ctx->last) != 1) {
ERROR(ctx->dev, "subcase %d completed out of order, last %d\n",
subcase->number, ctx->last);
status = -EDOM;
ctx->last = subcase->number; goto error;
}
}
ctx->last = subcase->number;
/* succeed or fault in only one way? */ if (status == subcase->expected)
status = 0;
/* some faults are allowed, not required */ if (subcase->expected > 0 && (
((status == -subcase->expected /* happened */
|| status == 0)))) /* didn't */
status = 0; /* sometimes more than one fault is allowed */ elseif (subcase->number == 12 && status == -EPIPE)
status = 0; else
ERROR(ctx->dev, "subtest %d error, status %d\n",
subcase->number, status);
}
/* unexpected status codes mean errors; ideally, in hardware */ if (status) {
error: if (ctx->status == 0) { int i;
/* allocate and init the urbs we'll queue. * as with bulk/intr sglists, sglen is the queue depth; it also * controls which subtests run (more tests than sglen) or rerun.
*/
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); if (!urb) return -ENOMEM; for (i = 0; i < param->sglen; i++) { int pipe = usb_rcvctrlpipe(udev, 0); unsigned len; struct urb *u; struct usb_ctrlrequest req; struct subcase *reqp;
/* sign of this variable means: * -: tested code must return this (negative) error code * +: tested code may return this (negative too) error code
*/ int expected = 0;
/* requests here are mostly expected to succeed on any * device, but some are chosen to trigger protocol stalls * or short reads.
*/
memset(&req, 0, sizeof(req));
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
switch (i % NUM_SUBCASES) { case 0: /* get device descriptor */
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
len = sizeof(struct usb_device_descriptor); break; case 1: /* get first config descriptor (only) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor); break; case 2: /* get altsetting (OFTEN STALLS) */
req.bRequest = USB_REQ_GET_INTERFACE;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; /* index = 0 means first interface */
len = 1;
expected = EPIPE; break; case 3: /* get interface status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; /* interface 0 */
len = 2; break; case 4: /* get device status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
len = 2; break; case 5: /* get device qualifier (MAY STALL) */
req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
len = sizeof(struct usb_qualifier_descriptor); if (udev->speed != USB_SPEED_HIGH)
expected = EPIPE; break; case 6: /* get first config descriptor, plus interface */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
len += sizeof(struct usb_interface_descriptor); break; case 7: /* get interface descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); /* interface == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = -EPIPE; break; /* NOTE: two consecutive stalls in the queue here.
* that tests fault recovery a bit more aggressively. */ case 8: /* clear endpoint halt (MAY STALL) */
req.bRequest = USB_REQ_CLEAR_FEATURE;
req.bRequestType = USB_RECIP_ENDPOINT; /* wValue 0 == ep halt */ /* wIndex 0 == ep0 (shouldn't halt!) */
len = 0;
pipe = usb_sndctrlpipe(udev, 0);
expected = EPIPE; break; case 9: /* get endpoint status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT; /* endpoint 0 */
len = 2; break; case 10: /* trigger short read (EREMOTEIO) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = 1024;
expected = -EREMOTEIO; break; /* NOTE: two consecutive _different_ faults in the queue. */ case 11: /* get endpoint descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8); /* endpoint == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = EPIPE; break; /* NOTE: sometimes even a third fault in the queue! */ case 12: /* get string 0 descriptor (MAY STALL) */
req.wValue = cpu_to_le16(USB_DT_STRING << 8); /* string == 0, for language IDs */
len = sizeof(struct usb_interface_descriptor); /* may succeed when > 4 languages */
expected = EREMOTEIO; /* or EPIPE, if no strings */ break; case 13: /* short read, resembling case 10 */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); /* last data packet "should" be DATA1, not DATA0 */ if (udev->speed == USB_SPEED_SUPER)
len = 1024 - 512; else
len = 1024 - udev->descriptor.bMaxPacketSize0;
expected = -EREMOTEIO; break; case 14: /* short read; try to fill the last packet */
req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0); /* device descriptor size == 18 bytes */
len = udev->descriptor.bMaxPacketSize0; if (udev->speed == USB_SPEED_SUPER)
len = 512; switch (len) { case 8:
len = 24; break; case 16:
len = 32; break;
}
expected = -EREMOTEIO; break; case 15:
req.wValue = cpu_to_le16(USB_DT_BOS << 8); if (udev->bos)
len = le16_to_cpu(udev->bos->desc->wTotalLength); else
len = sizeof(struct usb_bos_descriptor); if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
expected = -EPIPE; break; default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL; goto cleanup;
}
req.wLength = cpu_to_le16(len);
urb[i] = u = simple_alloc_urb(udev, pipe, len, 0); if (!u) goto cleanup;
staticvoid unlink1_callback(struct urb *urb)
{ int status = urb->status;
/* we "know" -EPIPE (stall) never happens */ if (!status)
status = usb_submit_urb(urb, GFP_ATOMIC); if (status) {
urb->status = status;
complete(urb->context);
}
}
staticint unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
{ struct urb *urb; struct completion completion; int retval = 0;
if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
urb->transfer_flags |= URB_ZERO_PACKET;
}
/* keep the endpoint busy. there are lots of hc/hcd-internal * states, and testing should get to all of them over time. * * FIXME want additional tests for when endpoint is STALLing * due to errors, or is just NAKing requests.
*/
retval = usb_submit_urb(urb, GFP_KERNEL); if (retval != 0) {
dev_err(&dev->intf->dev, "submit fail %d\n", retval); return retval;
}
/* unlinking that should always work. variable delay tests more * hcd states and code paths, even with little other system load.
*/
msleep(jiffies % (2 * INTERRUPT_RATE)); if (async) { while (!completion_done(&completion)) {
retval = usb_unlink_urb(urb);
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(dev, urb);
switch (retval) { case -EBUSY: case -EIDRM: /* we can't unlink urbs while they're completing * or if they've completed, and we haven't * resubmitted. "normal" drivers would prevent * resubmission, but since we're testing unlink * paths, we can't.
*/
ERROR(dev, "unlink retry\n"); continue; case 0: case -EINPROGRESS: break;
staticvoid unlink_queued_callback(struct urb *urb)
{ int status = urb->status; struct queued_ctx *ctx = urb->context;
if (ctx->status) goto done; if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) { if (status == -ECONNRESET) goto done; /* What error should we report if the URB completed normally? */
} if (status != 0)
ctx->status = status;
done: if (atomic_dec_and_test(&ctx->pending))
complete(&ctx->complete);
}
staticint unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, unsigned size)
{ struct queued_ctx ctx; struct usb_device *udev = testdev_to_usbdev(dev); void *buf;
dma_addr_t buf_dma; int i; int retval = -ENOMEM;
init_completion(&ctx.complete);
atomic_set(&ctx.pending, 1); /* One more than the actual value */
ctx.num = num;
ctx.status = 0;
/* Allocate and init the urbs we'll queue */
ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL); if (!ctx.urbs) goto free_buf; for (i = 0; i < num; i++) {
ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!ctx.urbs[i]) goto free_urbs;
usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
unlink_queued_callback, &ctx);
ctx.urbs[i]->transfer_dma = buf_dma;
ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
if (usb_pipeout(ctx.urbs[i]->pipe)) {
simple_fill_buf(ctx.urbs[i]);
ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
}
}
/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ for (i = 0; i < num; i++) {
atomic_inc(&ctx.pending);
retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL); if (retval != 0) {
dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
i, retval);
atomic_dec(&ctx.pending);
ctx.status = retval; break;
}
} if (i == num) {
usb_unlink_urb(ctx.urbs[num - 4]);
usb_unlink_urb(ctx.urbs[num - 2]);
} else { while (--i >= 0)
usb_unlink_urb(ctx.urbs[i]);
}
if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
complete(&ctx.complete);
wait_for_completion(&ctx.complete);
retval = ctx.status;
free_urbs: for (i = 0; i < num; i++)
usb_free_urb(ctx.urbs[i]);
kfree(ctx.urbs);
free_buf:
usb_free_coherent(udev, size, buf, buf_dma); return retval;
}
staticint verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{ int retval;
u16 status;
/* should look and act halted */
retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval); return retval;
} if (status != 1) {
ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status); return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__); if (retval != -EPIPE) return -EINVAL;
retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted"); if (retval != -EPIPE) return -EINVAL; return 0;
}
staticint test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
{ int retval;
/* shouldn't look or act halted now */
retval = verify_not_halted(tdev, ep, urb); if (retval < 0) return retval;
/* set halt (protocol test only), verify it worked */
retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, ep,
NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval < 0) {
ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval); return retval;
}
retval = verify_halted(tdev, ep, urb); if (retval < 0) { int ret;
/* clear halt anyways, else further tests will fail */
ret = usb_clear_halt(urb->dev, urb->pipe); if (ret)
ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
ep, ret);
return retval;
}
/* clear halt (tests API + protocol), verify it worked */
retval = usb_clear_halt(urb->dev, urb->pipe); if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); return retval;
}
retval = verify_not_halted(tdev, ep, urb); if (retval < 0) return retval;
/* NOTE: could also verify SET_INTERFACE clear halts ... */
return 0;
}
staticint test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb)
{ int retval;
/* clear initial data toggle to DATA0 */
retval = usb_clear_halt(urb->dev, urb->pipe); if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); return retval;
}
/* transfer 3 data packets, should be DATA0, DATA1, DATA0 */
retval = simple_io(tdev, urb, 1, 0, 0, __func__); if (retval != 0) return -EINVAL;
/* clear halt resets device side data toggle, host should react to it */
retval = usb_clear_halt(urb->dev, urb->pipe); if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); return retval;
}
/* host should use DATA0 again after clear halt */
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
return retval;
}
staticint halt_simple(struct usbtest_dev *dev)
{ int ep; int retval = 0; struct urb *urb; struct usb_device *udev = testdev_to_usbdev(dev);
/* * Create a URB that causes a transfer of uneven amount of data packets * This way the clear toggle has an impact on the data toggle sequence. * Use 2 maxpacket length packets and one zero packet.
*/
urb = simple_alloc_urb(udev, 0, 2 * maxp, 0); if (urb == NULL) return -ENOMEM;
/* Control OUT tests use the vendor control requests from Intel's * USB 2.0 compliance test device: write a buffer, read it back. * * Intel's spec only _requires_ that it work for one packet, which * is pretty weak. Some HCDs place limits here; most devices will * need to be able to handle more than one OUT data packet. We'll * try whatever we're told to try.
*/ staticint ctrl_out(struct usbtest_dev *dev, unsigned count, unsigned length, unsigned vary, unsigned offset)
{ unsigned i, j, len; int retval;
u8 *buf; char *what = "?"; struct usb_device *udev;
/* NOTE: hardware might well act differently if we pushed it * with lots back-to-back queued requests.
*/ for (i = 0; i < count; i++) { /* write patterned data */ for (j = 0; j < len; j++)
buf[j] = (u8)(i + j);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_SET_TIMEOUT); if (retval != len) {
what = "write"; if (retval >= 0) {
ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
} break;
}
/* read it back -- assuming nothing intervened!! */
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_GET_TIMEOUT); if (retval != len) {
what = "read"; if (retval >= 0) {
ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
} break;
}
/* fail if we can't verify */ for (j = 0; j < len; j++) { if (buf[j] != (u8)(i + j)) {
ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
j, buf[j], (u8)(i + j));
retval = -EBADMSG; break;
}
} if (retval < 0) {
what = "verify"; break;
}
len += vary;
/* [real world] the "zero bytes IN" case isn't really used. * hardware can easily trip up in this weird case, since its * status stage is IN, not OUT like other ep0in transfers.
*/ if (len > length)
len = realworld ? 1 : 0;
}
if (urb->status == 0 && ctx->count > (ctx->pending - 1)
&& !ctx->submit_error) { int status = usb_submit_urb(urb, GFP_ATOMIC); switch (status) { case 0: goto done; default:
dev_err(&ctx->dev->intf->dev, "resubmit err %d\n",
status);
fallthrough; case -ENODEV: /* disconnected */ case -ESHUTDOWN: /* endpoint disabled */
ctx->submit_error = 1; break;
}
}
ctx->pending--; if (ctx->pending == 0) { if (ctx->errors)
dev_err(&ctx->dev->intf->dev, "during the test, %lu errors out of %lu\n",
ctx->errors, ctx->packet_count);
complete(&ctx->done);
}
done:
spin_unlock_irqrestore(&ctx->lock, flags);
}
staticstruct urb *iso_alloc_urb( struct usb_device *udev, int pipe, struct usb_endpoint_descriptor *desc, long bytes, unsigned offset
)
{ struct urb *urb; unsigned i, maxp, packets;
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL,
&urb->transfer_dma); if (!urb->transfer_buffer) {
usb_free_urb(urb); return NULL;
} if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
urb->transfer_dma += offset;
} /* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
for (i = 0; i < packets; i++) { /* here, only the last packet will be short */
urb->iso_frame_desc[i].length = min_t(unsignedint,
bytes, maxp);
bytes -= urb->iso_frame_desc[i].length;
spin_lock_irq(&context.lock); for (i = 0; i < param->sglen; i++) {
++context.pending;
status = usb_submit_urb(urbs[i], GFP_ATOMIC); if (status < 0) {
ERROR(dev, "submit iso[%d], error %d\n", i, status); if (i == 0) {
spin_unlock_irq(&context.lock); goto fail;
}
for (i = 0; i < param->sglen; i++) { if (urbs[i])
simple_free_urb(urbs[i]);
} /* * Isochronous transfers are expected to fail sometimes. As an * arbitrary limit, we will report an error if any submissions * fail or if the transfer failure rate is > 10%.
*/ if (status != 0)
; elseif (context.submit_error)
status = -EACCES; elseif (context.errors >
(context.is_iso ? context.packet_count / 10 : 0))
status = -EIO;
kfree(urbs); return status;
fail: for (i = 0; i < param->sglen; i++) { if (urbs[i])
simple_free_urb(urbs[i]);
}
kfree(urbs); return status;
}
staticint test_unaligned_bulk( struct usbtest_dev *tdev, int pipe, unsigned length, int iterations, unsigned transfer_flags, constchar *label)
{ int retval; struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev),
pipe, length, transfer_flags, 1, 0, simple_callback);
if (param->iterations <= 0) return -EINVAL; if (param->sglen > MAX_SGLEN) return -EINVAL; /* * Just a bunch of test cases that every HCD is expected to handle. * * Some may need specific firmware, though it'd be good to have * one firmware image to handle all the test cases. * * FIXME add more tests! cancel requests, verify the data, control * queueing, concurrent read+write threads, and so on.
*/ switch (param->test_num) {
case 0:
dev_info(&intf->dev, "TEST 0: NOP\n");
retval = 0; break;
/* Tests for bulk I/O using DMA mapping by core and odd address */ case 17: if (dev->out_pipe == 0) break;
dev_info(&intf->dev, "TEST 17: write odd addr %d bytes %u times core map\n",
param->length, param->iterations);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.