// SPDX-License-Identifier: GPL-2.0+ /* * f_fs.c -- user mode file system API for USB composite function controllers * * Copyright (C) 2010 Samsung Electronics * Author: Michal Nazarewicz <mina86@mina86.com> * * Based on inode.c (GadgetFS) which was: * Copyright (C) 2003-2004 David Brownell * Copyright (C) 2003 Agilent Technologies
*/
/* * Buffer for holding data from partial reads which may happen since * we’re rounding user read requests to a multiple of a max packet size. * * The pointer is initialised with NULL value and may be set by * __ffs_epfile_read_data function to point to a temporary buffer. * * In normal operation, calls to __ffs_epfile_read_buffered will consume * data from said buffer and eventually free it. Importantly, while the * function is using the buffer, it sets the pointer to NULL. This is * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered * can never run concurrently (they are synchronised by epfile->mutex) * so the latter will not assign a new value to the pointer. * * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is * valid) and sets the pointer to READ_BUFFER_DROP value. This special * value is crux of the synchronisation between ffs_func_eps_disable and * __ffs_epfile_read_data. * * Once __ffs_epfile_read_data is about to finish it will try to set the * pointer back to its old value (as described above), but seeing as the * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free * the buffer. * * == State transitions == * * • ptr == NULL: (initial state) * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP * ◦ __ffs_epfile_read_buffered: nop * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf * ◦ reading finishes: n/a, not in ‘and reading’ state * • ptr == DROP: * ◦ __ffs_epfile_read_buffer_free: nop * ◦ __ffs_epfile_read_buffered: go to ptr == NULL * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop * ◦ reading finishes: n/a, not in ‘and reading’ state * • ptr == buf: * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered * is always called first * ◦ reading finishes: n/a, not in ‘and reading’ state * • ptr == NULL and reading: * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading * ◦ __ffs_epfile_read_buffered: n/a, mutex is held * ◦ __ffs_epfile_read_data: n/a, mutex is held * ◦ reading finishes and … * … all data read: free buf, go to ptr == NULL * … otherwise: go to ptr == buf and reading * • ptr == DROP and reading: * ◦ __ffs_epfile_read_buffer_free: nop * ◦ __ffs_epfile_read_buffered: n/a, mutex is held * ◦ __ffs_epfile_read_data: n/a, mutex is held * ◦ reading finishes: free buf, go to ptr == DROP
*/ struct ffs_buffer *read_buffer; #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
if (!req) {
spin_unlock_irq(&ffs->ev.waitq.lock); return -EINVAL;
}
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
spin_unlock_irq(&ffs->ev.waitq.lock);
req->buf = data;
req->length = len;
/* * UDC layer requires to provide a buffer even for ZLP, but should * not use it at all. Let's provide some poisoned pointer to catch * possible bug in the driver.
*/ if (req->buf == NULL)
req->buf = (void *)0xDEADBABE;
reinit_completion(&ffs->ep0req_completion);
ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); if (ret < 0) return ret;
ret = wait_for_completion_interruptible(&ffs->ep0req_completion); if (ret) {
usb_ep_dequeue(ffs->gadget->ep0, req); return -EINTR;
}
ret = ffs_ready(ffs); if (ret < 0) {
ffs->state = FFS_CLOSING; return ret;
}
return len;
} break;
case FFS_ACTIVE:
data = NULL; /* * We're called from user space, we can use _irq * rather then _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock); switch (ffs_setup_state_clear_cancelled(ffs)) { case FFS_SETUP_CANCELLED:
ret = -EIDRM; goto done_spin;
case FFS_NO_SETUP:
ret = -ESRCH; goto done_spin;
case FFS_SETUP_PENDING: break;
}
/* FFS_SETUP_PENDING */ if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
spin_unlock_irq(&ffs->ev.waitq.lock);
ret = __ffs_ep0_stall(ffs); break;
}
/* FFS_SETUP_PENDING and not stall */
len = min_t(size_t, len, le16_to_cpu(ffs->ev.setup.wLength));
spin_unlock_irq(&ffs->ev.waitq.lock);
data = ffs_prepare_buffer(buf, len); if (IS_ERR(data)) {
ret = PTR_ERR(data); break;
}
spin_lock_irq(&ffs->ev.waitq.lock);
/* * We are guaranteed to be still in FFS_ACTIVE state * but the state of setup could have changed from * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need * to check for that. If that happened we copied data * from user space in vain but it's unlikely. * * For sure we are not in FFS_NO_SETUP since this is * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP * transition can be performed and it's protected by * mutex.
*/ if (ffs_setup_state_clear_cancelled(ffs) ==
FFS_SETUP_CANCELLED) {
ret = -EIDRM;
done_spin:
spin_unlock_irq(&ffs->ev.waitq.lock);
} else { /* unlocks spinlock */
ret = __ffs_ep0_queue_wait(ffs, data, len);
}
kfree(data); break;
default:
ret = -EBADFD; break;
}
mutex_unlock(&ffs->mutex); return ret;
}
/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
size_t n)
__releases(&ffs->ev.waitq.lock)
{ /* * n cannot be bigger than ffs->ev.count, which cannot be bigger than * size of ffs->ev.types array (which is four) so that's how much space * we reserve.
*/ struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)]; const size_t size = n * sizeof *events; unsigned i = 0;
memset(events, 0, size);
do {
events[i].type = ffs->ev.types[i]; if (events[i].type == FUNCTIONFS_SETUP) {
events[i].u.setup = ffs->ev.setup;
ffs->setup_state = FFS_SETUP_PENDING;
}
} while (++i < n);
ffs->ev.count -= n; if (ffs->ev.count)
memmove(ffs->ev.types, ffs->ev.types + n,
ffs->ev.count * sizeof *ffs->ev.types);
/* Fast check if setup was canceled */ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) return -EIDRM;
/* Acquire mutex */
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (ret < 0) return ret;
/* Check state */ if (ffs->state != FFS_ACTIVE) {
ret = -EBADFD; goto done_mutex;
}
/* * We're called from user space, we can use _irq rather then * _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock);
switch (ffs_setup_state_clear_cancelled(ffs)) { case FFS_SETUP_CANCELLED:
ret = -EIDRM; break;
case FFS_NO_SETUP:
n = len / sizeof(struct usb_functionfs_event); if (!n) {
ret = -EINVAL; break;
}
if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
ret = -EAGAIN; break;
}
if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
ffs->ev.count)) {
ret = -EINTR; break;
}
/* unlocks spinlock */ return __ffs_ep0_read_events(ffs, buf,
min_t(size_t, n, ffs->ev.count));
case FFS_SETUP_PENDING: if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
spin_unlock_irq(&ffs->ev.waitq.lock);
ret = __ffs_ep0_stall(ffs); goto done_mutex;
}
len = min_t(size_t, len, le16_to_cpu(ffs->ev.setup.wLength));
spin_unlock_irq(&ffs->ev.waitq.lock);
if (len) {
data = kmalloc(len, GFP_KERNEL); if (!data) {
ret = -ENOMEM; goto done_mutex;
}
}
spin_lock_irq(&ffs->ev.waitq.lock);
/* See ffs_ep0_write() */ if (ffs_setup_state_clear_cancelled(ffs) ==
FFS_SETUP_CANCELLED) {
ret = -EIDRM; break;
}
/* unlocks spinlock */
ret = __ffs_ep0_queue_wait(ffs, data, len); if ((ret > 0) && (copy_to_user(buf, data, len)))
ret = -EFAULT; goto done_mutex;
if (req->status)
io_data->status = req->status; else
io_data->status = req->actual;
complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
{
ssize_t ret = copy_to_iter(data, data_len, iter); if (ret == data_len) return ret;
if (iov_iter_count(iter)) return -EFAULT;
/* * Dear user space developer! * * TL;DR: To stop getting below error message in your kernel log, change * user space code using functionfs to align read buffers to a max * packet size. * * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max * packet size. When unaligned buffer is passed to functionfs, it * internally uses a larger, aligned buffer so that such UDCs are happy. * * Unfortunately, this means that host may send more data than was * requested in read(2) system call. f_fs doesn’t know what to do with * that excess data so it simply drops it. * * Was the buffer aligned in the first place, no such problem would * happen. * * Data may be dropped only in AIO reads. Synchronous reads are handled * by splitting a request into multiple parts. This splitting may still * be a problem though so it’s likely best to align the buffer * regardless of it being AIO or not.. * * This only affects OUT endpoints, i.e. reading data with a read(2), * aio_read(2) etc. system calls. Writing data to an IN endpoint is not * affected.
*/
pr_err("functionfs read size %d > requested size %zd, dropping excess data. " "Align read buffer size to max packet size to avoid the problem.\n",
data_len, ret);
return ret;
}
/* * allocate a virtually contiguous buffer and create a scatterlist describing it * @sg_table - pointer to a place to be filled with sg_table contents * @size - required buffer size
*/ staticvoid *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
{ struct page **pages; void *vaddr, *ptr; unsignedint n_pages; int i;
if (io_data->read && ret > 0) {
kthread_use_mm(io_data->mm);
ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
kthread_unuse_mm(io_data->mm);
}
io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd);
usb_ep_free_request(io_data->ep, io_data->req);
if (io_data->read)
kfree(io_data->to_free);
ffs_free_buffer(io_data);
kfree(io_data);
}
staticvoid __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
{ /* * See comment in struct ffs_epfile for full read_buffer pointer * synchronisation story.
*/ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP); if (buf && buf != READ_BUFFER_DROP)
kfree(buf);
}
/* Assumes epfile->mutex is held. */ static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile, struct iov_iter *iter)
{ /* * Null out epfile->read_buffer so ffs_func_eps_disable does not free * the buffer while we are using it. See comment in struct ffs_epfile * for full read_buffer pointer synchronisation story.
*/ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret; if (!buf || buf == READ_BUFFER_DROP) return 0;
ret = copy_to_iter(buf->data, buf->length, iter); if (buf->length == ret) {
kfree(buf); return ret;
}
if (iov_iter_count(iter)) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
if (cmpxchg(&epfile->read_buffer, NULL, buf))
kfree(buf);
/* * At this point read_buffer is NULL or READ_BUFFER_DROP (if * ffs_func_eps_disable has been called in the meanwhile). See comment * in struct ffs_epfile for full read_buffer pointer synchronisation * story.
*/ if (cmpxchg(&epfile->read_buffer, NULL, buf))
kfree(buf);
/* * Do we have buffered data from previous partial read? Check * that for synchronous case only because we do not have * facility to ‘wake up’ a pending asynchronous read and push * buffered data to it which we would need to make things behave * consistently.
*/ if (!io_data->aio && io_data->read) {
ret = __ffs_epfile_read_buffered(epfile, &io_data->data); if (ret) goto error_mutex;
}
/* * if we _do_ wait above, the epfile->ffs->gadget might be NULL * before the waiting completes, so do not assign to 'gadget' * earlier
*/
gadget = epfile->ffs->gadget;
spin_lock_irq(&epfile->ffs->eps_lock); /* In the meantime, endpoint got disabled or changed. */ if (epfile->ep != ep) {
ret = -ESHUTDOWN; goto error_lock;
}
data_len = iov_iter_count(&io_data->data); /* * Controller may require buffer size to be aligned to * maxpacketsize of an out endpoint.
*/ if (io_data->read)
data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
data = ffs_alloc_buffer(io_data, data_len); if (!data) {
ret = -ENOMEM; goto error_mutex;
} if (!io_data->read &&
!copy_from_iter_full(data, data_len, &io_data->data)) {
ret = -EFAULT; goto error_mutex;
}
}
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) { /* In the meantime, endpoint got disabled or changed. */
ret = -ESHUTDOWN;
} elseif (halt) {
ret = usb_ep_set_halt(ep->ep); if (!ret)
ret = -EBADMSG;
} elseif (data_len == -EINVAL) { /* * Sanity Check: even though data_len can't be used * uninitialized at the time I write this comment, some * compilers complain about this situation. * In order to keep the code clean from warnings, data_len is * being initialized to -EINVAL during its declaration, which * means we can't rely on compiler anymore to warn no future * changes won't result in data_len being used uninitialized. * For such reason, we're adding this redundant sanity check * here.
*/
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} elseif (!io_data->aio) { bool interrupted = false;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); if (ret < 0) goto error_lock;
spin_unlock_irq(&epfile->ffs->eps_lock);
if (wait_for_completion_interruptible(&io_data->done)) {
spin_lock_irq(&epfile->ffs->eps_lock); if (epfile->ep != ep) {
ret = -ESHUTDOWN; goto error_lock;
} /* * To avoid race condition with ffs_epfile_io_complete, * dequeue the request first then check * status. usb_ep_dequeue API should guarantee no race * condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
spin_unlock_irq(&epfile->ffs->eps_lock);
wait_for_completion(&io_data->done);
interrupted = io_data->status < 0;
}
/* Close all attached DMABUFs */
list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) { /* Cancel any pending transfer */
spin_lock_irq(&ffs->eps_lock); if (priv->ep && priv->req)
usb_ep_dequeue(priv->ep, priv->req);
spin_unlock_irq(&ffs->eps_lock);
/* * The fence will be unref'd in ffs_dmabuf_cleanup. * It can't be done here, as the unref functions might try to lock * the resv object, which would deadlock.
*/
INIT_WORK(&dma_fence->work, ffs_dmabuf_cleanup);
queue_work(priv->ffs->io_completion_wq, &dma_fence->work);
}
if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK) return -EINVAL;
dmabuf = dma_buf_get(req->fd); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf);
if (req->length > dmabuf->size || req->length == 0) {
ret = -EINVAL; goto err_dmabuf_put;
}
attach = ffs_dmabuf_find_attachment(epfile, dmabuf); if (IS_ERR(attach)) {
ret = PTR_ERR(attach); goto err_dmabuf_put;
}
priv = attach->importer_priv;
ep = ffs_epfile_wait_ep(file); if (IS_ERR(ep)) {
ret = PTR_ERR(ep); goto err_attachment_put;
}
ret = ffs_dma_resv_lock(dmabuf, nonblock); if (ret) goto err_attachment_put;
/* Make sure we don't have writers */
timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
retl = dma_resv_wait_timeout(dmabuf->resv,
dma_resv_usage_rw(epfile->in), true, timeout); if (retl == 0)
retl = -EBUSY; if (retl < 0) {
ret = (int)retl; goto err_resv_unlock;
}
ret = dma_resv_reserve_fences(dmabuf->resv, 1); if (ret) goto err_resv_unlock;
fence = kmalloc(sizeof(*fence), GFP_KERNEL); if (!fence) {
ret = -ENOMEM; goto err_resv_unlock;
}
fence->priv = priv;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */ if (epfile->ep != ep) {
ret = -ESHUTDOWN; goto err_fence_put;
}
usb_req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC); if (!usb_req) {
ret = -ENOMEM; goto err_fence_put;
}
/* * usb_ep_queue() guarantees that all transfers are processed in the * order they are enqueued, so we can use a simple incrementing * sequence number for the dma_fence.
*/
seqno = atomic_add_return(1, &epfile->seqno);
/* Wait for endpoint to be enabled */
ep = ffs_epfile_wait_ep(file); if (IS_ERR(ep)) return PTR_ERR(ep);
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */ if (epfile->ep != ep) {
spin_unlock_irq(&epfile->ffs->eps_lock); return -ESHUTDOWN;
}
switch (code) { case FUNCTIONFS_FIFO_STATUS:
ret = usb_ep_fifo_status(epfile->ep->ep); break; case FUNCTIONFS_FIFO_FLUSH:
usb_ep_fifo_flush(epfile->ep->ep);
ret = 0; break; case FUNCTIONFS_CLEAR_HALT:
ret = usb_ep_clear_halt(epfile->ep->ep); break; case FUNCTIONFS_ENDPOINT_REVMAP:
ret = epfile->ep->num; break; case FUNCTIONFS_ENDPOINT_DESC:
{ int desc_idx; struct usb_endpoint_descriptor desc1, *desc;
switch (epfile->ffs->gadget->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS:
desc_idx = 2; break; case USB_SPEED_HIGH:
desc_idx = 1; break; default:
desc_idx = 0;
}
/* * Set up the superblock for a mount.
*/ staticint ffs_fs_get_tree(struct fs_context *fc)
{ struct ffs_sb_fill_data *ctx = fc->fs_private; struct ffs_data *ffs; int ret;
if (!fc->source) return invalf(fc, "No source specified");
/* * potential race possible between ffs_func_eps_disable * & ffs_epfile_release therefore maintaining a local * copy of epfile will save us from use-after-free.
*/ if (epfiles) {
ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
if (ffs->ffs_eventfd) {
eventfd_ctx_put(ffs->ffs_eventfd);
ffs->ffs_eventfd = NULL;
}
lang = ffs->stringtabs; if (lang) { for (; *lang; ++lang) { struct usb_string *str = (*lang)->strings; int id = first_id; for (; str->s; ++id, ++str)
str->id = id;
}
}
/* Parsing and building descriptors and strings *****************************/
/* * This validates if data pointed by data is a valid USB descriptor as * well as record how many interfaces, endpoints and strings are * required by given configuration. Returns address after the * descriptor or NULL if data is invalid.
*/
staticint __must_check ffs_do_single_desc(char *data, unsigned len,
ffs_entity_callback entity, void *priv, int *current_class, int *current_subclass)
{ struct usb_descriptor_header *_ds = (void *)data;
u8 length; int ret;
/* At least two bytes are required: length and type */ if (len < 2) {
pr_vdebug("descriptor too short\n"); return -EINVAL;
}
/* If we have at least as many bytes as the descriptor takes? */
length = _ds->bLength; if (len < length) {
pr_vdebug("descriptor longer then available data\n"); return -EINVAL;
}
/* Parse descriptor depending on type. */ switch (_ds->bDescriptorType) { case USB_DT_DEVICE: case USB_DT_CONFIG: case USB_DT_STRING: case USB_DT_DEVICE_QUALIFIER: /* function can't have any of those */
pr_vdebug("descriptor reserved for gadget: %d\n",
_ds->bDescriptorType); return -EINVAL;
case USB_DT_INTERFACE: { struct usb_interface_descriptor *ds = (void *)_ds;
pr_vdebug("interface descriptor\n"); if (length != sizeof *ds) goto inv_length;
case USB_TYPE_CLASS | 0x01: if (*current_class == USB_INTERFACE_CLASS_HID) {
pr_vdebug("hid descriptor\n"); if (length != sizeof(struct hid_descriptor)) goto inv_length; break;
} elseif (*current_class == USB_INTERFACE_CLASS_CCID) {
pr_vdebug("ccid descriptor\n"); if (length != sizeof(struct ccid_descriptor)) goto inv_length; break;
} elseif (*current_class == USB_CLASS_APP_SPEC &&
*current_subclass == USB_SUBCLASS_DFU) {
pr_vdebug("dfu functional descriptor\n"); if (length != sizeof(struct usb_dfu_functional_descriptor)) goto inv_length; break;
} else {
pr_vdebug("unknown descriptor: %d for class %d\n",
_ds->bDescriptorType, *current_class); return -EINVAL;
}
case USB_DT_OTG: if (length != sizeof(struct usb_otg_descriptor)) goto inv_length; break;
case USB_DT_INTERFACE_ASSOCIATION: { struct usb_interface_assoc_descriptor *ds = (void *)_ds;
pr_vdebug("interface association descriptor\n"); if (length != sizeof *ds) goto inv_length; if (ds->iFunction)
__entity(STRING, ds->iFunction);
} break;
case USB_DT_SS_ENDPOINT_COMP:
pr_vdebug("EP SS companion descriptor\n"); if (length != sizeof(struct usb_ss_ep_comp_descriptor)) goto inv_length; break;
case USB_DT_OTHER_SPEED_CONFIG: case USB_DT_INTERFACE_POWER: case USB_DT_DEBUG: case USB_DT_SECURITY: case USB_DT_CS_RADIO_CONTROL: /* TODO */
pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType); return -EINVAL;
default: /* We should never be here */
pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType); return -EINVAL;
case FFS_INTERFACE: /* * Interfaces are indexed from zero so if we * encountered interface "n" then there are at least * "n+1" interfaces.
*/ if (*valuep >= helper->interfaces_count)
helper->interfaces_count = *valuep + 1; break;
case FFS_STRING: /* * Strings are indexed from 1 (0 is reserved * for languages list)
*/ if (*valuep > helper->ffs->strings_count)
helper->ffs->strings_count = *valuep; break;
case FFS_ENDPOINT:
d = (void *)desc;
helper->eps_count++; if (helper->eps_count >= FFS_MAX_EPS_COUNT) return -EINVAL; /* Check if descriptors for any speed were already parsed */ if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
helper->ffs->eps_addrmap[helper->eps_count] =
d->bEndpointAddress; elseif (helper->ffs->eps_addrmap[helper->eps_count] !=
d->bEndpointAddress) return -EINVAL; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.