staticconstchar *uvc_query_name(u8 query)
{ switch (query) { case UVC_SET_CUR: return"SET_CUR"; case UVC_GET_CUR: return"GET_CUR"; case UVC_GET_MIN: return"GET_MIN"; case UVC_GET_MAX: return"GET_MAX"; case UVC_GET_RES: return"GET_RES"; case UVC_GET_LEN: return"GET_LEN"; case UVC_GET_INFO: return"GET_INFO"; case UVC_GET_DEF: return"GET_DEF"; default: return"";
}
}
int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
u8 intfnum, u8 cs, void *data, u16 size)
{ int ret;
u8 error;
u8 tmp;
ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
UVC_CTRL_CONTROL_TIMEOUT); if (likely(ret == size)) return 0;
/* * Some devices return shorter USB control packets than expected if the * returned value can fit in less bytes. Zero all the bytes that the * device has not written. * * This quirk is applied to all controls, regardless of their data type. * Most controls are little-endian integers, in which case the missing * bytes become 0 MSBs. For other data types, a different heuristic * could be implemented if a device is found needing it. * * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need * to be excluded because its size is always 1.
*/ if (ret > 0 && query != UVC_GET_INFO) {
memset(data + ret, 0, size - ret);
dev_warn_once(&dev->udev->dev, "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
uvc_query_name(query), cs, unit, ret, size); return 0;
}
if (ret != -EPIPE) {
dev_err(&dev->udev->dev, "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
uvc_query_name(query), cs, unit, ret, size); return ret < 0 ? ret : -EPIPE;
}
/* Reuse data[0] to request the error code. */
tmp = *(u8 *)data;
ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
UVC_VC_REQUEST_ERROR_CODE_CONTROL, data, 1,
UVC_CTRL_CONTROL_TIMEOUT);
error = *(u8 *)data;
*(u8 *)data = tmp;
if (ret != 1) {
dev_err_ratelimited(&dev->udev->dev, "Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n",
uvc_query_name(query), cs, unit, ret); return ret < 0 ? ret : -EPIPE;
}
switch (error) { case 0: /* Cannot happen - we received a STALL */ return -EPIPE; case 1: /* Not ready */ return -EBUSY; case 2: /* Wrong state */ return -EACCES; case 3: /* Power */ return -EREMOTE; case 4: /* Out of range */ return -ERANGE; case 5: /* Invalid unit */ case 6: /* Invalid control */ case 7: /* Invalid Request */ /* * The firmware has not properly implemented * the control or there has been a HW error.
*/ return -EIO; case 8: /* Invalid value within range */ return -EINVAL; default: /* reserved or unknown */ break;
}
/* * The response of the Elgato Cam Link 4K is incorrect: The second byte * contains bFormatIndex (instead of being the second byte of bmHint). * The first byte is always zero. The third byte is always 1. * * The UVC 1.5 class specification defines the first five bits in the * bmHint bitfield. The remaining bits are reserved and should be zero. * Therefore a valid bmHint will be less than 32. * * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix. * MCU: 20.02.19, FPGA: 67
*/ if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
ctrl->bmHint > 255) {
u8 corrected_format_index = ctrl->bmHint >> 8;
uvc_dbg(stream->dev, VIDEO, "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
ctrl->bmHint, ctrl->bFormatIndex,
1, corrected_format_index);
ctrl->bmHint = 1;
ctrl->bFormatIndex = corrected_format_index;
}
for (i = 0; i < stream->nformats; ++i) { if (stream->formats[i].index == ctrl->bFormatIndex) {
format = &stream->formats[i]; break;
}
}
if (format == NULL) return;
for (i = 0; i < format->nframes; ++i) { if (format->frames[i].bFrameIndex == ctrl->bFrameIndex) {
frame = &format->frames[i]; break;
}
}
/* * The "TOSHIBA Web Camera - 5M" Chicony device (04f2:b50b) seems to * compute the bandwidth on 16 bits and erroneously sign-extend it to * 32 bits, resulting in a huge bandwidth value. Detect and fix that * condition by setting the 16 MSBs to 0 when they're all equal to 1.
*/ if ((ctrl->dwMaxPayloadTransferSize & 0xffff0000) == 0xffff0000)
ctrl->dwMaxPayloadTransferSize &= ~0xffff0000;
/* * Compute a bandwidth estimation by multiplying the frame * size by the number of video frames per second, divide the * result by the number of USB frames (or micro-frames for * high- and super-speed devices) per second and add the UVC * header size (assumed to be 12 bytes long).
*/
bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
bandwidth *= 10000000 / interval + 1;
bandwidth /= 1000; if (stream->dev->udev->speed >= USB_SPEED_HIGH)
bandwidth /= 8;
bandwidth += 12;
/* * The bandwidth estimate is too low for many cameras. Don't use * maximum packet sizes lower than 1024 bytes to try and work * around the problem. According to measurements done on two * different camera models, the value is high enough to get most * resolutions working while not preventing two simultaneous * VGA streams at 15 fps.
*/
bandwidth = max_t(u32, bandwidth, 1024);
ctrl->dwMaxPayloadTransferSize = bandwidth;
}
if (stream->intf->num_altsetting > 1 &&
ctrl->dwMaxPayloadTransferSize > stream->maxpsize) {
dev_warn_ratelimited(&stream->intf->dev, "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n",
ctrl->dwMaxPayloadTransferSize,
stream->maxpsize);
ctrl->dwMaxPayloadTransferSize = stream->maxpsize;
}
}
static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
{ /* * Return the size of the video probe and commit controls, which depends * on the protocol version.
*/ if (stream->dev->uvc_version < 0x0110) return 26; elseif (stream->dev->uvc_version < 0x0150) return 34; else return 48;
}
staticint uvc_get_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe, u8 query)
{
u16 size = uvc_video_ctrl_size(stream);
u8 *data; int ret;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF) return -EIO;
data = kmalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM;
if ((query == UVC_GET_MIN || query == UVC_GET_MAX) && ret == 2) { /* * Some cameras, mostly based on Bison Electronics chipsets, * answer a GET_MIN or GET_MAX request with the wCompQuality * field only.
*/
uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non " "compliance - GET_MIN/MAX(PROBE) incorrectly " "supported. Enabling workaround.\n");
memset(ctrl, 0, sizeof(*ctrl));
ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
ret = 0; goto out;
} elseif (query == UVC_GET_DEF && probe == 1 && ret != size) { /* * Many cameras don't support the GET_DEF request on their * video probe control. Warn once and return, the caller will * fall back to GET_CUR.
*/
uvc_warn_once(stream->dev, UVC_WARN_PROBE_DEF, "UVC non " "compliance - GET_DEF(PROBE) not supported. " "Enabling workaround.\n");
ret = -EIO; goto out;
} elseif (ret != size) {
dev_err(&stream->intf->dev, "Failed to query (%s) UVC %s control : %d (exp. %u).\n",
uvc_query_name(query), probe ? "probe" : "commit",
ret, size);
ret = (ret == -EPROTO) ? -EPROTO : -EIO; goto out;
}
/* * Some broken devices return null or wrong dwMaxVideoFrameSize and * dwMaxPayloadTransferSize fields. Try to get the value from the * format and frame descriptors.
*/
uvc_fixup_video_ctrl(stream, ctrl);
ret = 0;
out:
kfree(data); return ret;
}
staticint uvc_set_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe)
{
u16 size = uvc_video_ctrl_size(stream);
u8 *data; int ret;
data = kzalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM;
ret = __uvc_query_ctrl(stream->dev, UVC_SET_CUR, 0, stream->intfnum,
probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
size, uvc_timeout_param); if (ret != size) {
dev_err(&stream->intf->dev, "Failed to set UVC %s control : %d (exp. %u).\n",
probe ? "probe" : "commit", ret, size);
ret = -EIO;
}
kfree(data); return ret;
}
int uvc_probe_video(struct uvc_streaming *stream, struct uvc_streaming_control *probe)
{ struct uvc_streaming_control probe_min, probe_max; unsignedint i; int ret;
/* * Perform probing. The device should adjust the requested values * according to its capabilities. However, some devices, namely the * first generation UVC Logitech webcams, don't implement the Video * Probe control properly, and just return the needed bandwidth. For * that reason, if the needed bandwidth exceeds the maximum available * bandwidth, try to lower the quality.
*/
ret = uvc_set_video_ctrl(stream, probe, 1); if (ret < 0) goto done;
/* Get the minimum and maximum values for compression settings. */ if (!(stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX)) {
ret = uvc_get_video_ctrl(stream, &probe_min, 1, UVC_GET_MIN); if (ret < 0) goto done;
ret = uvc_get_video_ctrl(stream, &probe_max, 1, UVC_GET_MAX); if (ret < 0) goto done;
probe->wCompQuality = probe_max.wCompQuality;
}
for (i = 0; i < 2; ++i) {
ret = uvc_set_video_ctrl(stream, probe, 1); if (ret < 0) goto done;
ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR); if (ret < 0) goto done;
if (stream->intf->num_altsetting == 1) break;
if (probe->dwMaxPayloadTransferSize <= stream->maxpsize) break;
if (stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX) {
ret = -ENOSPC; goto done;
}
/* * If we write new data on the position where we had the last * overflow, remove the overflow pointer. There is no SOF overflow * in the whole circular buffer.
*/ if (clock->head == clock->last_sof_overflow)
clock->last_sof_overflow = -1;
spin_lock_irqsave(&clock->lock, flags);
if (clock->count > 0 && clock->last_sof > sample->dev_sof) { /* * Remove data from the circular buffer that is older than the * last SOF overflow. We only support one SOF overflow per * circular buffer.
*/ if (clock->last_sof_overflow != -1)
clock->count = (clock->head - clock->last_sof_overflow
+ clock->size) % clock->size;
clock->last_sof_overflow = clock->head;
}
/* Check for invalid headers. */ if (len < header_size) return;
/* * Extract the timestamps: * * - store the frame PTS in the buffer structure * - if the SCR field is present, retrieve the host SOF counter and * kernel timestamps and store them with the SCR STC and SOF fields * in the ring buffer
*/ if (has_pts && buf != NULL)
buf->pts = get_unaligned_le32(&data[2]);
if (!has_scr) return;
/* * To limit the amount of data, drop SCRs with an SOF identical to the * previous one. This filtering is also needed to support UVC 1.5, where * all the data packets of the same frame contains the same SOF. In that * case only the first one will match the host_sof.
*/
sample.dev_sof = get_unaligned_le16(&data[header_size - 2]); if (sample.dev_sof == stream->clock.last_sof) return;
/* * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5 * standard states that it "must be captured when the first video data * of a video frame is put on the USB bus". This is generally understood * as requiring devices to clear the payload header's SCR bit before * the first packet containing video data. * * Most vendors follow that interpretation, but some (namely SunplusIT * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR * field with 0's,and expect that the driver only processes the SCR if * there is data in the packet. * * Ignore all the hardware timestamp information if we haven't received * any data for this frame yet, the packet contains no data, and both * STC and SOF are zero. This heuristics should be safe on compliant * devices. This should be safe with compliant devices, as in the very * unlikely case where a UVC 1.1 device would send timing information * only before the first packet containing data, and both STC and SOF * happen to be zero for a particular frame, we would only miss one * clock sample from many and the clock recovery algorithm wouldn't * suffer from this condition.
*/ if (buf && buf->bytesused == 0 && len == header_size &&
sample.dev_stc == 0 && sample.dev_sof == 0) return;
/* * On some devices, like the Logitech C922, the device SOF does not run * at a stable rate of 1kHz. For those devices use the host SOF instead. * In the tests performed so far, this improves the timestamp precision. * This is probably explained by a small packet handling jitter from the * host, but the exact reason hasn't been fully determined.
*/ if (stream->dev->quirks & UVC_QUIRK_INVALID_DEVICE_SOF)
sample.dev_sof = sample.host_sof;
sample.host_time = uvc_video_get_time();
/* * The UVC specification allows device implementations that can't obtain * the USB frame number to keep their own frame counters as long as they * match the size and frequency of the frame number associated with USB * SOF tokens. The SOF values sent by such devices differ from the USB * SOF tokens by a fixed offset that needs to be estimated and accounted * for to make timestamp recovery as accurate as possible. * * The offset is estimated the first time a device SOF value is received * as the difference between the host and device SOF values. As the two * SOF values can differ slightly due to transmission delays, consider * that the offset is null if the difference is not higher than 10 ms * (negative differences can not happen and are thus considered as an * offset). The video commit control wDelay field should be used to * compute a dynamic threshold instead of using a fixed 10 ms value, but * devices don't report reliable wDelay values. * * See uvc_video_clock_host_sof() for an explanation regarding why only * the 8 LSBs of the delta are kept.
*/ if (stream->clock.sof_offset == (u16)-1) {
u16 delta_sof = (sample.host_sof - sample.dev_sof) & 255; if (delta_sof >= 10)
stream->clock.sof_offset = delta_sof; else
stream->clock.sof_offset = 0;
}
/* * uvc_video_clock_host_sof - Return the host SOF value for a clock sample * * Host SOF counters reported by usb_get_current_frame_number() usually don't * cover the whole 11-bits SOF range (0-2047) but are limited to the HCI frame * schedule window. They can be limited to 8, 9 or 10 bits depending on the host * controller and its configuration. * * We thus need to recover the SOF value corresponding to the host frame number. * As the device and host frame numbers are sampled in a short interval, the * difference between their values should be equal to a small delta plus an * integer multiple of 256 caused by the host frame number limited precision. * * To obtain the recovered host SOF value, compute the small delta by masking * the high bits of the host frame counter and device SOF difference and add it * to the device SOF value.
*/ static u16 uvc_video_clock_host_sof(conststruct uvc_clock_sample *sample)
{ /* The delta value can be negative. */
s8 delta_sof;
/* * uvc_video_clock_update - Update the buffer timestamp * * This function converts the buffer PTS timestamp to the host clock domain by * going through the USB SOF clock domain and stores the result in the V4L2 * buffer timestamp field. * * The relationship between the device clock and the host clock isn't known. * However, the device and the host share the common USB SOF clock which can be * used to recover that relationship. * * The relationship between the device clock and the USB SOF clock is considered * to be linear over the clock samples sliding window and is given by * * SOF = m * PTS + p * * Several methods to compute the slope (m) and intercept (p) can be used. As * the clock drift should be small compared to the sliding window size, we * assume that the line that goes through the points at both ends of the window * is a good approximation. Naming those points P1 and P2, we get * * SOF = (SOF2 - SOF1) / (STC2 - STC1) * PTS * + (SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) * * or * * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) * * to avoid losing precision in the division. Similarly, the host timestamp is * computed with * * TS = ((TS2 - TS1) * SOF + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) * * SOF values are coded on 11 bits by USB. We extend their precision with 16 * decimal bits, leading to a 11.16 coding. * * TODO: To avoid surprises with device clock values, PTS/STC timestamps should * be normalized using the nominal device clock frequency reported through the * UVC descriptors. * * Both the PTS/STC and SOF counters roll over, after a fixed but device * specific amount of time for PTS/STC and after 2048ms for SOF. As long as the * sliding window size is smaller than the rollover period, differences computed * on unsigned integers will produce the correct result. However, the p term in * the linear relations will be miscomputed. * * To fix the issue, we subtract a constant from the PTS and STC values to bring * PTS to half the 32 bit STC range. The sliding window STC values then fit into * the 32 bit range without any rollover. * * Similarly, we add 2048 to the device SOF values to make sure that the SOF * computed by (1) will never be smaller than 0. This offset is then compensated * by adding 2048 to the SOF values used in (2). However, this doesn't prevent * rollovers between (1) and (2): the SOF value computed by (1) can be slightly * lower than 4096, and the host SOF counters can have rolled over to 2048. This * case is handled by subtracting 2048 from the SOF value if it exceeds the host * SOF value at the end of the sliding window. * * Finally we subtract a constant from the host timestamps to bring the first * timestamp of the sliding window to 1s.
*/ void uvc_video_clock_update(struct uvc_streaming *stream, struct vb2_v4l2_buffer *vbuf, struct uvc_buffer *buf)
{ struct uvc_clock *clock = &stream->clock; struct uvc_clock_sample *first; struct uvc_clock_sample *last; unsignedlong flags;
u64 timestamp;
u32 delta_stc;
u32 y1;
u32 x1, x2;
u32 mean;
u32 sof;
u64 y, y2;
if (!uvc_hw_timestamps_param) return;
/* * We will get called from __vb2_queue_cancel() if there are buffers * done but not dequeued by the user, but the sample array has already * been released at that time. Just bail out in that case.
*/ if (!clock->samples) return;
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < 2) goto done;
first = &clock->samples[(clock->head - clock->count + clock->size) % clock->size];
last = &clock->samples[(clock->head - 1 + clock->size) % clock->size];
/* First step, PTS to SOF conversion. */
delta_stc = buf->pts - (1UL << 31);
x1 = first->dev_stc - delta_stc;
x2 = last->dev_stc - delta_stc; if (x1 == x2) goto done;
/* * Have at least 1/4 of a second of timestamps before we * try to do any calculation. Otherwise we do not have enough * precision. This value was determined by running Android CTS * on different devices. * * dev_sof runs at 1KHz, and we have a fixed point precision of * 16 bits.
*/ if ((y2 - y1) < ((1000 / 4) << 16)) goto done;
/* * Interpolated and host SOF timestamps can wrap around at slightly * different times. Handle this by adding or removing 2048 to or from * the computed SOF value to keep it close to the SOF samples mean * value.
*/
mean = (x1 + x2) / 2; if (mean - (1024 << 16) > sof)
sof += 2048 << 16; elseif (sof > mean + (1024 << 16))
sof -= 2048 << 16;
/* Is PTS constant through the whole frame ? */ if (has_pts && stream->stats.frame.nb_pts) { if (stream->stats.frame.pts != pts) {
stream->stats.frame.nb_pts_diffs++;
stream->stats.frame.last_pts_diff =
stream->stats.frame.nb_packets;
}
}
if (has_pts) {
stream->stats.frame.nb_pts++;
stream->stats.frame.pts = pts;
}
/* * Do all frames have a PTS in their first non-empty packet, or before * their first empty packet ?
*/ if (stream->stats.frame.size == 0) { if (len > header_size)
stream->stats.frame.has_initial_pts = has_pts; if (len == header_size && has_pts)
stream->stats.frame.has_early_pts = true;
}
/* Do the SCR.STC and SCR.SOF fields vary through the frame ? */ if (has_scr && stream->stats.frame.nb_scr) { if (stream->stats.frame.scr_stc != scr_stc)
stream->stats.frame.nb_scr_diffs++;
}
if (has_scr) { /* Expand the SOF counter to 32 bits and store its value. */ if (stream->stats.stream.nb_frames > 0 ||
stream->stats.frame.nb_scr > 0)
stream->stats.stream.scr_sof_count +=
(scr_sof - stream->stats.stream.scr_sof) % 2048;
stream->stats.stream.scr_sof = scr_sof;
if (scr_sof < stream->stats.stream.min_sof)
stream->stats.stream.min_sof = scr_sof; if (scr_sof > stream->stats.stream.max_sof)
stream->stats.stream.max_sof = scr_sof;
}
/* Record the first non-empty packet number. */ if (stream->stats.frame.size == 0 && len > header_size)
stream->stats.frame.first_data = stream->stats.frame.nb_packets;
/* Update the frame size. */
stream->stats.frame.size += len - header_size;
/* Update the packets counters. */
stream->stats.frame.nb_packets++; if (len <= header_size)
stream->stats.frame.nb_empty++;
if (data[1] & UVC_STREAM_ERR)
stream->stats.frame.nb_errors++;
}
if (frame->has_early_pts)
stream->stats.stream.nb_pts_early++; if (frame->has_initial_pts)
stream->stats.stream.nb_pts_initial++; if (frame->last_pts_diff <= frame->first_data)
stream->stats.stream.nb_pts_constant++; if (frame->nb_scr >= frame->nb_packets - frame->nb_empty)
stream->stats.stream.nb_scr_count_ok++; if (frame->nb_scr_diffs + 1 == frame->nb_scr)
stream->stats.stream.nb_scr_diffs_ok++;
/* * Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF * frequency this will not overflow before more than 1h.
*/
duration = ktime_ms_delta(stream->stats.stream.stop_ts,
stream->stats.stream.start_ts); if (duration != 0)
scr_sof_freq = stream->stats.stream.scr_sof_count * 1000
/ duration; else
scr_sof_freq = 0;
/* ------------------------------------------------------------------------ * Video codecs
*/
/* * Video payload decoding is handled by uvc_video_decode_start(), * uvc_video_decode_data() and uvc_video_decode_end(). * * uvc_video_decode_start is called with URB data at the start of a bulk or * isochronous payload. It processes header data and returns the header size * in bytes if successful. If an error occurs, it returns a negative error * code. The following error codes have special meanings. * * - EAGAIN informs the caller that the current video buffer should be marked * as done, and that the function should be called again with the same data * and a new video buffer. This is used when end of frame conditions can be * reliably detected at the beginning of the next frame only. * * If an error other than -EAGAIN is returned, the caller will drop the current * payload. No call to uvc_video_decode_data and uvc_video_decode_end will be * made until the next payload. -ENODATA can be used to drop the current * payload if no other error code is appropriate. * * uvc_video_decode_data is called for every URB with URB data. It copies the * data to the video buffer. * * uvc_video_decode_end is called with header data at the end of a bulk or * isochronous payload. It performs any additional header data processing and * returns 0 or a negative error code if an error occurred. As header data have * already been processed by uvc_video_decode_start, this functions isn't * required to perform sanity checks a second time. * * For isochronous transfers where a payload is always transferred in a single * URB, the three functions will be called in a row. * * To let the decoder process header data and update its internal state even * when no video buffer is available, uvc_video_decode_start must be prepared * to be called with a NULL buf parameter. uvc_video_decode_data and * uvc_video_decode_end will never be called with a NULL buffer.
*/ staticint uvc_video_decode_start(struct uvc_streaming *stream, struct uvc_buffer *buf, const u8 *data, int len)
{
u8 header_len;
u8 fid;
/* * Sanity checks: * - packet must be at least 2 bytes long * - bHeaderLength value must be at least 2 bytes (see above) * - bHeaderLength value can't be larger than the packet size.
*/ if (len < 2 || data[0] < 2 || data[0] > len) {
stream->stats.frame.nb_invalid++; return -EINVAL;
}
/* * Increase the sequence number regardless of any buffer states, so * that discontinuous sequence numbers always indicate lost frames.
*/ if (stream->last_fid != fid) {
stream->sequence++; if (stream->sequence)
uvc_video_stats_update(stream);
}
/* * Store the payload FID bit and return immediately when the buffer is * NULL.
*/ if (buf == NULL) {
stream->last_fid = fid; return -ENODATA;
}
/* Mark the buffer as bad if the error bit is set. */ if (data[1] & UVC_STREAM_ERR) {
uvc_dbg(stream->dev, FRAME, "Marking buffer as bad (error bit set)\n");
buf->error = 1;
}
/* * Synchronize to the input stream by waiting for the FID bit to be * toggled when the buffer state is not UVC_BUF_STATE_ACTIVE. * stream->last_fid is initialized to -1, so the first isochronous * frame will always be in sync. * * If the device doesn't toggle the FID bit, invert stream->last_fid * when the EOF bit is set to force synchronisation on the next packet.
*/ if (buf->state != UVC_BUF_STATE_ACTIVE) { if (fid == stream->last_fid) {
uvc_dbg(stream->dev, FRAME, "Dropping payload (out of sync)\n"); if ((stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) &&
(data[1] & UVC_STREAM_EOF))
stream->last_fid ^= UVC_STREAM_FID; return -ENODATA;
}
/* * Mark the buffer as done if we're at the beginning of a new frame. * End of frame detection is better implemented by checking the EOF * bit (FID bit toggling is delayed by one frame compared to the EOF * bit), but some devices don't set the bit at end of frame (and the * last payload can be lost anyway). We thus must check if the FID has * been toggled. * * stream->last_fid is initialized to -1, so the first isochronous * frame will never trigger an end of frame detection. * * Empty buffers (bytesused == 0) don't trigger end of frame detection * as it doesn't make sense to return an empty buffer. This also * avoids detecting end of frame conditions at FID toggling if the * previous payload had the EOF bit set.
*/ if (fid != stream->last_fid && buf->bytesused != 0) {
uvc_dbg(stream->dev, FRAME, "Frame complete (FID bit toggled)\n");
buf->state = UVC_BUF_STATE_READY; return -EAGAIN;
}
/* * Some cameras, when running two parallel streams (one MJPEG alongside * another non-MJPEG stream), are known to lose the EOF packet for a frame. * We can detect the end of a frame by checking for a new SOI marker, as * the SOI always lies on the packet boundary between two frames for * these devices.
*/ if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
(stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) { const u8 *packet = data + header_len;
/* * uvc_video_decode_data_work: Asynchronous memcpy processing * * Copy URB data to video buffers in process context, releasing buffer * references and requeuing the URB when done.
*/ staticvoid uvc_video_copy_data_work(struct work_struct *work)
{ struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work); unsignedint i; int ret;
for (i = 0; i < uvc_urb->async_operations; i++) { struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
memcpy(op->dst, op->src, op->len);
/* Release reference taken on this buffer. */
uvc_queue_buffer_release(op->buf);
}
ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL); if (ret < 0)
dev_err(&uvc_urb->stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret);
}
/* Complete the current frame if the buffer size was exceeded. */ if (len > maxlen) {
uvc_dbg(uvc_urb->stream->dev, FRAME, "Frame complete (overflow)\n");
buf->error = 1;
buf->state = UVC_BUF_STATE_READY;
}
uvc_urb->async_operations++;
}
staticvoid uvc_video_decode_end(struct uvc_streaming *stream, struct uvc_buffer *buf, const u8 *data, int len)
{ /* Mark the buffer as done if the EOF marker is set. */ if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) {
uvc_dbg(stream->dev, FRAME, "Frame complete (EOF found)\n"); if (data[0] == len)
uvc_dbg(stream->dev, FRAME, "EOF in empty payload\n");
buf->state = UVC_BUF_STATE_READY; if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
stream->last_fid ^= UVC_STREAM_FID;
}
}
/* * Video payload encoding is handled by uvc_video_encode_header() and * uvc_video_encode_data(). Only bulk transfers are currently supported. * * uvc_video_encode_header is called at the start of a payload. It adds header * data to the transfer buffer and returns the header size. As the only known * UVC output device transfers a whole frame in a single payload, the EOF bit * is always set in the header. * * uvc_video_encode_data is called for every URB and copies the data from the * video buffer to the transfer buffer.
*/ staticint uvc_video_encode_header(struct uvc_streaming *stream, struct uvc_buffer *buf, u8 *data, int len)
{
data[0] = 2; /* Header length */
data[1] = UVC_STREAM_EOH | UVC_STREAM_EOF
| (stream->last_fid & UVC_STREAM_FID); return 2;
}
/* * Additionally to the payload headers we also want to provide the user with USB * Frame Numbers and system time values. The resulting buffer is thus composed * of blocks, containing a 64-bit timestamp in nanoseconds, a 16-bit USB Frame * Number, and a copy of the payload header. * * Ideally we want to capture all payload headers for each frame. However, their * number is unknown and unbound. We thus drop headers that contain no vendor * data and that either contain no SCR value or an SCR value identical to the * previous header.
*/ staticvoid uvc_video_decode_meta(struct uvc_streaming *stream, struct uvc_buffer *meta_buf, const u8 *mem, unsignedint length)
{ struct uvc_meta_buf *meta;
size_t len_std = 2; bool has_pts, has_scr; unsignedlong flags; unsignedint sof;
ktime_t time; const u8 *scr;
for (i = 0; i < urb->number_of_packets; ++i) { if (urb->iso_frame_desc[i].status < 0) {
uvc_dbg(stream->dev, FRAME, "USB isochronous frame lost (%d)\n",
urb->iso_frame_desc[i].status); /* Mark the buffer as faulty. */ if (buf != NULL)
buf->error = 1; continue;
}
/* Decode the payload header. */
mem = urb->transfer_buffer + urb->iso_frame_desc[i].offset; do {
ret = uvc_video_decode_start(stream, buf, mem,
urb->iso_frame_desc[i].actual_length); if (ret == -EAGAIN)
uvc_video_next_buffers(stream, &buf, &meta_buf);
} while (ret == -EAGAIN);
/* * Ignore ZLPs if they're not part of a frame, otherwise process them * to trigger the end of payload detection.
*/ if (urb->actual_length == 0 && stream->bulk.header_size == 0) return;
mem = urb->transfer_buffer;
len = urb->actual_length;
stream->bulk.payload_size += len;
/* * If the URB is the first of its payload, decode and save the * header.
*/ if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) { do {
ret = uvc_video_decode_start(stream, buf, mem, len); if (ret == -EAGAIN)
uvc_video_next_buffers(stream, &buf, &meta_buf);
} while (ret == -EAGAIN);
/* If an error occurred skip the rest of the payload. */ if (ret < 0 || buf == NULL) {
stream->bulk.skip_payload = 1;
} else {
memcpy(stream->bulk.header, mem, ret);
stream->bulk.header_size = ret;
/* * The buffer queue might have been cancelled while a bulk transfer * was in progress, so we can reach here with buf equal to NULL. Make * sure buf is never dereferenced if NULL.
*/
/* Prepare video data for processing. */ if (!stream->bulk.skip_payload && buf != NULL)
uvc_video_decode_data(uvc_urb, buf, mem, len);
/* * Detect the payload end by a URB smaller than the maximum size (or * a payload size equal to the maximum) and process the header again.
*/ if (urb->actual_length < urb->transfer_buffer_length ||
stream->bulk.payload_size >= stream->bulk.max_payload_size) { if (!stream->bulk.skip_payload && buf != NULL) {
uvc_video_decode_end(stream, buf, stream->bulk.header,
stream->bulk.payload_size); if (buf->state == UVC_BUF_STATE_READY)
uvc_video_next_buffers(stream, &buf, &meta_buf);
}
u8 *mem = urb->transfer_buffer; int len = stream->urb_size, ret;
if (buf == NULL) {
urb->transfer_buffer_length = 0; return;
}
/* If the URB is the first of its payload, add the header. */ if (stream->bulk.header_size == 0) {
ret = uvc_video_encode_header(stream, buf, mem, len);
stream->bulk.header_size = ret;
stream->bulk.payload_size += ret;
mem += ret;
len -= ret;
}
/* Process video data. */
ret = uvc_video_encode_data(stream, buf, mem, len);
default:
dev_warn(&stream->intf->dev, "Non-zero status (%d) in video completion handler.\n",
urb->status);
fallthrough; case -ENOENT: /* usb_poison_urb() called. */ if (stream->frozen) return;
fallthrough; case -ECONNRESET: /* usb_unlink_urb() called. */ case -ESHUTDOWN: /* The endpoint is being disabled. */
uvc_queue_cancel(queue, urb->status == -ESHUTDOWN); if (vb2_qmeta)
uvc_queue_cancel(qmeta, urb->status == -ESHUTDOWN); return;
}
buf = uvc_queue_get_current_buffer(queue);
if (vb2_qmeta) {
spin_lock_irqsave(&qmeta->irqlock, flags); if (!list_empty(&qmeta->irqqueue))
buf_meta = list_first_entry(&qmeta->irqqueue, struct uvc_buffer, queue);
spin_unlock_irqrestore(&qmeta->irqlock, flags);
}
/* Re-initialise the URB async work. */
uvc_urb->async_operations = 0;
/* * Process the URB headers, and optionally queue expensive memcpy tasks * to be deferred to a work queue.
*/
stream->decode(uvc_urb, buf, buf_meta);
/* If no async work is needed, resubmit the URB immediately. */ if (!uvc_urb->async_operations) {
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC); if (ret < 0)
dev_err(&stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret); return;
}
/* * Allocate transfer buffers. This function can be called with buffers * already allocated when resuming from suspend, in which case it will * return without touching the buffers. * * Limit the buffer size to UVC_MAX_PACKETS bulk/isochronous packets. If the * system is too low on memory try successively smaller numbers of packets * until allocation succeeds. * * Return the number of allocated packets on success or 0 when out of memory.
*/ staticint uvc_alloc_urb_buffers(struct uvc_streaming *stream, unsignedint size, unsignedint psize, gfp_t gfp_flags)
{ unsignedint npackets; unsignedint i;
/* Buffers are already allocated, bail out. */ if (stream->urb_size) return stream->urb_size / psize;
/* * Compute the number of packets. Bulk endpoints might transfer UVC * payloads across multiple URBs.
*/
npackets = DIV_ROUND_UP(size, psize); if (npackets > UVC_MAX_PACKETS)
npackets = UVC_MAX_PACKETS;
/* Retry allocations until one succeed. */ for (; npackets > 1; npackets /= 2) {
stream->urb_size = psize * npackets;
for (i = 0; i < UVC_URBS; ++i) { struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) {
uvc_free_urb_buffers(stream); break;
}
uvc_urb->stream = stream;
}
if (i == UVC_URBS) {
uvc_dbg(stream->dev, VIDEO, "Allocated %u URB buffers of %ux%u bytes each\n",
UVC_URBS, npackets, psize); return npackets;
}
}
uvc_dbg(stream->dev, VIDEO, "Failed to allocate URB buffers (%u bytes per packet)\n",
psize); return 0;
}
/* * Uninitialize isochronous/bulk URBs and free transfer buffers.
*/ staticvoid uvc_video_stop_transfer(struct uvc_streaming *stream, int free_buffers)
{ struct uvc_urb *uvc_urb;
uvc_video_stats_stop(stream);
/* * We must poison the URBs rather than kill them to ensure that even * after the completion handler returns, any asynchronous workqueues * will be prevented from resubmitting the URBs.
*/
for_each_uvc_urb(uvc_urb, stream)
usb_poison_urb(uvc_urb->urb);
/* * Compute the maximum number of bytes per interval for an endpoint.
*/
u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep)
{
u16 psize;
switch (dev->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); default:
psize = usb_endpoint_maxp(&ep->desc);
psize *= usb_endpoint_maxp_mult(&ep->desc); return psize;
}
}
/* * Initialize isochronous URBs and allocate transfer buffers. The packet size * is given by the endpoint.
*/ staticint uvc_init_video_isoc(struct uvc_streaming *stream, struct usb_host_endpoint *ep, gfp_t gfp_flags)
{ struct urb *urb; struct uvc_urb *uvc_urb; unsignedint npackets, i;
u16 psize;
u32 size;
/* * Some devices, namely the Logitech C910 and B910, are unable * to recover from a USB autosuspend, unless the alternate * setting of the streaming interface is toggled.
*/ if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
usb_set_interface(stream->dev->udev, intfnum,
altsetting);
usb_set_interface(stream->dev->udev, intfnum, 0);
}
ret = usb_set_interface(stream->dev->udev, intfnum, altsetting); if (ret < 0) return ret;
ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
} else { /* Bulk endpoint, proceed to URB initialization. */
ep = uvc_find_endpoint(&intf->altsetting[0],
stream->header.bEndpointAddress); if (ep == NULL) return -EIO;
ret = uvc_init_video_bulk(stream, ep, gfp_flags);
}
if (ret < 0) return ret;
/* Submit the URBs. */
for_each_uvc_urb(uvc_urb, stream) {
ret = usb_submit_urb(uvc_urb->urb, gfp_flags); if (ret < 0) {
dev_err(&stream->intf->dev, "Failed to submit URB %u (%d).\n",
uvc_urb_index(uvc_urb), ret);
uvc_video_stop_transfer(stream, 1); return ret;
}
}
/* * The Logitech C920 temporarily forgets that it should not be adjusting * Exposure Absolute during init so restore controls to stored values.
*/ if (stream->dev->quirks & UVC_QUIRK_RESTORE_CTRLS_ON_INIT)
uvc_ctrl_restore_values(stream->dev);
/* * Stop streaming without disabling the video queue. * * To let userspace applications resume without trouble, we must not touch the * video buffers in any way. We mark the device as frozen to make sure the URB * completion handler won't try to cancel the queue when we kill the URBs.
*/ int uvc_video_suspend(struct uvc_streaming *stream)
{ if (!uvc_queue_streaming(&stream->queue)) return 0;
/* * Reconfigure the video interface and restart streaming if it was enabled * before suspend. * * If an error occurs, disable the video queue. This will wake all pending * buffers, making sure userspace applications are notified of the problem * instead of waiting forever.
*/ int uvc_video_resume(struct uvc_streaming *stream, int reset)
{ int ret;
/* * If the bus has been reset on resume, set the alternate setting to 0. * This should be the default value, but some devices crash or otherwise * misbehave if they don't receive a SET_INTERFACE request before any * other video control request.
*/ if (reset)
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
stream->frozen = 0;
uvc_video_clock_reset(&stream->clock);
if (!uvc_queue_streaming(&stream->queue)) return 0;
ret = uvc_commit_video(stream, &stream->ctrl); if (ret < 0) return ret;
/* ------------------------------------------------------------------------ * Video device
*/
/* * Initialize the UVC video device by switching to alternate setting 0 and * retrieve the default format. * * Some cameras (namely the Fuji Finepix) set the format and frame * indexes to zero. The UVC standard doesn't clearly make this a spec * violation, so try to silently fix the values if possible. * * This function is called before registering the device with V4L.
*/ int uvc_video_init(struct uvc_streaming *stream)
{ struct uvc_streaming_control *probe = &stream->ctrl; conststruct uvc_format *format = NULL; conststruct uvc_frame *frame = NULL; struct uvc_urb *uvc_urb; unsignedint i; int ret;
if (stream->nformats == 0) {
dev_info(&stream->intf->dev, "No supported video formats found.\n"); return -EINVAL;
}
atomic_set(&stream->active, 0);
/* * Alternate setting 0 should be the default, yet the XBox Live Vision * Cam (and possibly other devices) crash or otherwise misbehave if * they don't receive a SET_INTERFACE request before any other video * control request.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.59 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.