// SPDX-License-Identifier: GPL-2.0 /* * The USB Monitor, inspired by Dave Harding's USBMon. * * This is a binary format reader. * * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
*/
/* * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). * But it's all right. Just use a simple way to make sure the chunk is never * smaller than a page. * * N.B. An application does not know our chunk size. * * Woops, get_zeroed_page() returns a single page. I guess we're stuck with * page-sized chunks for the time being.
*/ #define CHUNK_SIZE PAGE_SIZE #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
/* * The magic limit was calculated so that it allows the monitoring * application to pick data once in two ticks. This way, another application, * which presumably drives the bus, gets to hog CPU, yet we collect our data. * * Originally, for a 480 Mbit/s bus this required a buffer of about 1 MB. For * modern 20 Gbps buses, this value increases to over 50 MB. The maximum * buffer size is set to 64 MiB to accommodate this. * * This is still too much for most cases, where we just snoop a few * descriptor fetches for enumeration. So, the default is a "reasonable" * amount for typical, low-throughput use cases. * * XXX What about multi-megabyte URBs which take minutes to transfer?
*/ #define BUFF_MAX CHUNK_ALIGN(64*1024*1024) #define BUFF_DFL CHUNK_ALIGN(300*1024) #define BUFF_MIN CHUNK_ALIGN(8*1024)
/* * The per-event API header (2 per URB). * * This structure is seen in userland as defined by the documentation.
*/ struct mon_bin_hdr {
u64 id; /* URB ID - from submission to callback */ unsignedchar type; /* Same as in text API; extensible. */ unsignedchar xfer_type; /* ISO, Intr, Control, Bulk */ unsignedchar epnum; /* Endpoint number and transfer direction */ unsignedchar devnum; /* Device address */ unsignedshort busnum; /* Bus number */ char flag_setup; char flag_data;
s64 ts_sec; /* ktime_get_real_ts64 */
s32 ts_usec; /* ktime_get_real_ts64 */ int status; unsignedint len_urb; /* Length of data (submitted or actual) */ unsignedint len_cap; /* Delivered length */ union { unsignedchar setup[SETUP_LEN]; /* Only for Control S-type */ struct iso_rec { int error_count; int numdesc;
} iso;
} s; int interval; int start_frame; unsignedint xfer_flags; unsignedint ndesc; /* Actual number of ISO descriptors */
};
/* * ISO vector, packed into the head of data stream. * This has to take 16 bytes to make sure that the end of buffer * wrap is not happening in the middle of a descriptor.
*/ struct mon_bin_isodesc { int iso_status; unsignedint iso_off; unsignedint iso_len;
u32 _pad;
};
struct mon_bin_get { struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ void __user *data;
size_t alloc; /* Length of data (can be zero) */
};
struct mon_bin_mfetch {
u32 __user *offvec; /* Vector of events fetched */
u32 nfetch; /* Number of events to fetch (out: fetched) */
u32 nflush; /* Number of events to flush */
};
/* Having these two values same prevents wrapping of the mon_bin_hdr */ #define PKT_ALIGN 64 #define PKT_SIZE 64
#define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ #define PKT_SZ_API1 64 /* API 1 size: extra fields */
#define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */
/* max number of USB bus supported */ #define MON_BIN_MAX_MINOR 128
/* * The buffer: map of used pages.
*/ struct mon_pgmap { struct page *pg; unsignedchar *ptr; /* XXX just use page_to_virt everywhere? */
};
/* * This gets associated with an open file struct.
*/ struct mon_reader_bin { /* The buffer: one per open. */
spinlock_t b_lock; /* Protect b_cnt, b_in */ unsignedint b_size; /* Current size of the buffer - bytes */ unsignedint b_cnt; /* Bytes used */ unsignedint b_in, b_out; /* Offsets into buffer - bytes */ unsignedint b_read; /* Amount of read data in curr. pkt. */ struct mon_pgmap *b_vec; /* The map array */
wait_queue_head_t b_wait; /* Wait for data here */
struct mutex fetch_lock; /* Protect b_read, b_out */ int mmap_active;
/* A list of these is needed for "bus 0". Some time later. */ struct mon_reader r;
/* * This is a "chunked memcpy". It does not manipulate any counters.
*/ staticunsignedint mon_copy_to_buff(conststruct mon_reader_bin *this, unsignedint off, constunsignedchar *from, unsignedint length)
{ unsignedint step_len; unsignedchar *buf; unsignedint in_page;
/* * Copy data and advance pointers.
*/
buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
memcpy(buf, from, step_len); if ((off += step_len) >= this->b_size) off = 0;
from += step_len;
length -= step_len;
} return off;
}
/* * This is a little worse than the above because it's "chunked copy_to_user". * The return value is an error code, not an offset.
*/ staticint copy_from_buf(conststruct mon_reader_bin *this, unsignedint off, char __user *to, int length)
{ unsignedint step_len; unsignedchar *buf; unsignedint in_page;
/* * Copy data and advance pointers.
*/
buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; if (copy_to_user(to, buf, step_len)) return -EINVAL; if ((off += step_len) >= this->b_size) off = 0;
to += step_len;
length -= step_len;
} return 0;
}
/* * Allocate an (aligned) area in the buffer. * This is called under b_lock. * Returns ~0 on failure.
*/ staticunsignedint mon_buff_area_alloc(struct mon_reader_bin *rp, unsignedint size)
{ unsignedint offset;
/* * This is the same thing as mon_buff_area_alloc, only it does not allow * buffers to wrap. This is needed by applications which pass references * into mmap-ed buffers up their stacks (libpcap can do that). * * Currently, we always have the header stuck with the data, although * it is not strictly speaking necessary. * * When a buffer would wrap, we place a filler packet to mark the space.
*/ staticunsignedint mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, unsignedint size)
{ unsignedint offset; unsignedint fill_size;
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; if (rp->b_in + size > rp->b_size) { /* * This would wrap. Find if we still have space after * skipping to the end of the buffer. If we do, place * a filler packet and allocate a new packet.
*/
fill_size = rp->b_size - rp->b_in; if (rp->b_cnt + size + fill_size > rp->b_size) return ~0;
mon_buff_area_fill(rp, rp->b_in, fill_size);
/* * Return a few (kilo-)bytes to the head of the buffer. * This is used if a data fetch fails.
*/ staticvoid mon_buff_area_shrink(struct mon_reader_bin *rp, unsignedint size)
{
/* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */
rp->b_cnt -= size; if (rp->b_in < size)
rp->b_in += rp->b_size;
rp->b_in -= size;
}
/* * This has to be called under both b_lock and fetch_lock, because * it accesses both b_cnt and b_out.
*/ staticvoid mon_buff_area_free(struct mon_reader_bin *rp, unsignedint size)
{
} else { /* If IOMMU coalescing occurred, we cannot trust sg_page */ if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
*flag = 'D'; return length;
}
/* Copy up to the first non-addressable segment */
for_each_sg(urb->sg, sg, urb->num_sgs, i) { if (length == 0 || PageHighMem(sg_page(sg))) break;
this_len = min_t(unsignedint, sg->length, length);
offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
this_len);
length -= this_len;
} if (i == 0)
*flag = 'D';
}
return length;
}
/* * This is the look-ahead pass in case of 'C Zi', when actual_length cannot * be used to determine the length of the whole contiguous buffer.
*/ staticunsignedint mon_bin_collate_isodesc(conststruct mon_reader_bin *rp, struct urb *urb, unsignedint ndesc)
{ struct usb_iso_packet_descriptor *fp; unsignedint length;
/* not an issue unless there's a subtle bug in a HCD somewhere */ if (length >= urb->transfer_buffer_length)
length = urb->transfer_buffer_length;
if (length >= rp->b_size/5)
length = rp->b_size/5;
if (usb_urb_dir_in(urb)) { if (ev_type == 'S') {
length = 0;
data_tag = '<';
} /* Cannot rely on endpoint number in case of control ep.0 */
dir = USB_DIR_IN;
} else { if (ev_type == 'C') {
length = 0;
data_tag = '>';
}
dir = 0;
}
/* * Extract an event from buffer and copy it to user space. * Wait if there is no event ready. * Returns zero or error.
*/ staticint mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, struct mon_bin_hdr __user *hdr, unsignedint hdrbytes, void __user *data, unsignedint nbytes)
{ unsignedlong flags; struct mon_bin_hdr *ep;
size_t step_len; unsignedint offset; int rc;
/* * Check if whole packet was read, and if so, jump to the next one.
*/ if (rp->b_read >= hdrbytes + ep->len_cap) {
spin_lock_irqsave(&rp->b_lock, flags);
mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
spin_unlock_irqrestore(&rp->b_lock, flags);
rp->b_read = 0;
}
mutex_unlock(&rp->fetch_lock); return done;
}
/* * Remove at most nevents from chunked buffer. * Returns the number of removed events.
*/ staticint mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
{ unsignedlong flags; struct mon_bin_hdr *ep; int i;
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags); for (i = 0; i < nevents; ++i) { if (MON_RING_EMPTY(rp)) break;
/* * Fetch at most max event offsets into the buffer and put them into vec. * The events are usually freed later with mon_bin_flush. * Return the effective number of events fetched.
*/ staticint mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
u32 __user *vec, unsignedint max)
{ unsignedint cur_out; unsignedint bytes, avail; unsignedint size; unsignedint nevents; struct mon_bin_hdr *ep; unsignedlong flags; int rc;
/* * Count events. This is almost the same as the above mon_bin_fetch, * only we do not store offsets into user vector, and we have no limit.
*/ staticint mon_bin_queued(struct mon_reader_bin *rp)
{ unsignedint cur_out; unsignedint bytes, avail; unsignedint size; unsignedint nevents; struct mon_bin_hdr *ep; unsignedlong flags;
case MON_IOCQ_URB_LEN: /* * N.B. This only returns the size of data, without the header.
*/
spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) {
ep = MON_OFF2HDR(rp, rp->b_out);
ret = ep->len_cap;
}
spin_unlock_irqrestore(&rp->b_lock, flags); break;
case MON_IOCQ_RING_SIZE:
mutex_lock(&rp->fetch_lock);
ret = rp->b_size;
mutex_unlock(&rp->fetch_lock); break;
case MON_IOCT_RING_SIZE: /* * Changing the buffer size will flush it's contents; the new * buffer is allocated before releasing the old one to be sure * the device will stay functional also in case of memory * pressure.
*/
{ int size; struct mon_pgmap *vec;
if (arg < BUFF_MIN || arg > BUFF_MAX) return -EINVAL;
/* * open and close: just keep track of how many times the device is * mapped, to use the proper memory allocation function.
*/ staticvoid mon_bin_vma_open(struct vm_area_struct *vma)
{ struct mon_reader_bin *rp = vma->vm_private_data; unsignedlong flags;
spin_lock_irqsave(&rp->b_lock, flags); while (MON_RING_EMPTY(rp)) {
spin_unlock_irqrestore(&rp->b_lock, flags);
if (file->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->b_wait, &waita); return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
}
schedule(); if (signal_pending(current)) {
remove_wait_queue(&rp->b_wait, &waita); return -EINTR;
}
set_current_state(TASK_INTERRUPTIBLE);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.