#ifdefined(CONFIG_VIDEO_V4L2_SUBDEV_API) /* * The Streams API is an experimental feature. To use the Streams API, set * 'v4l2_subdev_enable_streams_api' to 1 below.
*/
staticbool v4l2_subdev_enable_streams_api; #endif
/* * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set * of streams. * * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX * restricts the total number of streams in a pad, although the stream ID is * not restricted.
*/ #define V4L2_SUBDEV_MAX_STREAM_ID 63
staticint call_s_stream(struct v4l2_subdev *sd, int enable)
{ int ret;
/* * The .s_stream() operation must never be called to start or stop an * already started or stopped subdev. Catch offenders but don't return * an error yet to avoid regressions.
*/ if (WARN_ON(sd->s_stream_enabled == !!enable)) return 0;
ret = sd->ops->video->s_stream(sd, enable);
if (!enable && ret < 0) {
dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
ret = 0;
}
if (!ret) {
sd->s_stream_enabled = enable;
if (enable)
v4l2_subdev_enable_privacy_led(sd); else
v4l2_subdev_disable_privacy_led(sd);
}
return ret;
}
#ifdef CONFIG_MEDIA_CONTROLLER /* * Create state-management wrapper for pad ops dealing with subdev state. The * wrapper handles the case where the caller does not provide the called * subdev's state. This should be removed when all the callers are fixed.
*/ #define DEFINE_STATE_WRAPPER(f, arg_type) \ staticint call_##f##_state(struct v4l2_subdev *sd, \ struct v4l2_subdev_state *_state, \
arg_type *arg) \
{ \ struct v4l2_subdev_state *state = _state; \ int ret; \ if (!_state) \
state = v4l2_subdev_lock_and_get_active_state(sd); \
ret = call_##f(sd, state, arg); \ if (!_state && state) \
v4l2_subdev_unlock_state(state); \ return ret; \
}
switch (cmd) { default: return NULL; case VIDIOC_SUBDEV_G_FMT: case VIDIOC_SUBDEV_S_FMT:
which = ((struct v4l2_subdev_format *)arg)->which; break; case VIDIOC_SUBDEV_G_CROP: case VIDIOC_SUBDEV_S_CROP:
which = ((struct v4l2_subdev_crop *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; break; case VIDIOC_SUBDEV_G_SELECTION: case VIDIOC_SUBDEV_S_SELECTION:
which = ((struct v4l2_subdev_selection *)arg)->which; break; case VIDIOC_SUBDEV_G_FRAME_INTERVAL: case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg;
if (!(subdev_fh->client_caps &
V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
which = fi->which; break;
} case VIDIOC_SUBDEV_G_ROUTING: case VIDIOC_SUBDEV_S_ROUTING:
which = ((struct v4l2_subdev_routing *)arg)->which; break;
}
return which == V4L2_SUBDEV_FORMAT_TRY ?
subdev_fh->state :
v4l2_subdev_get_unlocked_active_state(sd);
}
/* * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. * Remove this when the API is no longer experimental.
*/ if (!v4l2_subdev_enable_streams_api)
streams_subdev = false;
case VIDIOC_QUERYCTRL: /* * TODO: this really should be folded into v4l2_queryctrl (this * currently returns -EINVAL for NULL control handlers). * However, v4l2_queryctrl() is still called directly by * drivers as well and until that has been addressed I believe * it is safer to do the check here. The same is true for the * other control ioctls below.
*/ if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_queryctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERY_EXT_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERYMENU: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_querymenu(vfh->ctrl_handler, arg);
case VIDIOC_G_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
case VIDIOC_G_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ext_ctrls(vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_try_ext_ctrls(vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return -ENOIOCTLCMD;
case VIDIOC_SUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, subscribe_event)) return v4l2_subdev_call(sd, core, subscribe_event,
vfh, arg);
if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) &&
vfh->ctrl_handler) return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg);
return -ENOIOCTLCMD;
case VIDIOC_UNSUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) return v4l2_subdev_call(sd, core, unsubscribe_event,
vfh, arg);
if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) return v4l2_event_subdev_unsubscribe(sd, vfh, arg);
return -ENOIOCTLCMD;
#ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_G_REGISTER:
{ struct v4l2_dbg_register *p = arg;
if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, g_register, p);
} case VIDIOC_DBG_S_REGISTER:
{ struct v4l2_dbg_register *p = arg;
if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, s_register, p);
} case VIDIOC_DBG_G_CHIP_INFO:
{ struct v4l2_dbg_chip_info *p = arg;
if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) return -EINVAL; if (sd->ops->core && sd->ops->core->s_register)
p->flags |= V4L2_CHIP_FL_WRITABLE; if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, sd->name, sizeof(p->name)); return 0;
} #endif
case VIDIOC_LOG_STATUS: { int ret;
pr_info("%s: ================= START STATUS =================\n",
sd->name);
ret = v4l2_subdev_call(sd, core, log_status);
pr_info("%s: ================== END STATUS ==================\n",
sd->name); return ret;
}
case VIDIOC_SUBDEV_G_FMT: { struct v4l2_subdev_format *format = arg;
for (i = 0; i < routing->num_routes; ++i) { conststruct v4l2_subdev_route *route = &routes[i]; conststruct media_pad *pads = sd->entity.pads;
if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) return -EINVAL;
if (route->sink_pad >= sd->entity.num_pads) return -EINVAL;
if (!(pads[route->sink_pad].flags &
MEDIA_PAD_FL_SINK)) return -EINVAL;
if (route->source_pad >= sd->entity.num_pads) return -EINVAL;
if (!(pads[route->source_pad].flags &
MEDIA_PAD_FL_SOURCE)) return -EINVAL;
if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)
num_active_routes++;
}
/* * Drivers that implement routing need to report a frame * descriptor accordingly, with up to one entry per route. Until * the frame descriptors entries get allocated dynamically, * limit the number of active routes to * V4L2_FRAME_DESC_ENTRY_MAX.
*/ if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX) return -E2BIG;
/* * If the driver doesn't support setting routing, just return * the routing table.
*/ if (!v4l2_subdev_has_op(sd, pad, set_routing)) {
memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
state->routing.routes,
min(state->routing.num_routes, routing->len_routes) * sizeof(*state->routing.routes));
routing->num_routes = state->routing.num_routes;
case VIDIOC_SUBDEV_S_CLIENT_CAP: { struct v4l2_subdev_client_capability *client_cap = arg;
/* * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not * enabled. Remove this when streams API is no longer * experimental.
*/ if (!v4l2_subdev_enable_streams_api)
client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
/* The width, height and code must match. */ if (source_fmt->format.width != sink_fmt->format.width) {
dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: width does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.width, sink_fmt->format.width);
pass = false;
}
if (source_fmt->format.height != sink_fmt->format.height) {
dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: height does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.height, sink_fmt->format.height);
pass = false;
}
if (source_fmt->format.code != sink_fmt->format.code) {
dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
__func__,
source_fmt->format.code, sink_fmt->format.code);
pass = false;
}
/* The field order must match, or the sink field order must be NONE * to support interlaced hardware connected to bridges that support * progressive formats only.
*/ if (source_fmt->format.field != sink_fmt->format.field &&
sink_fmt->format.field != V4L2_FIELD_NONE) {
dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: field does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.field, sink_fmt->format.field);
pass = false;
}
if (pass) return 0;
dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
/* * It is ok to have more source streams than sink streams as extra * source streams can just be ignored by the receiver, but having extra * sink streams is an error as streams must have a source.
*/
dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
sink_streams_mask; if (dangling_sink_streams) {
dev_err(dev, "Dangling sink streams: mask %#llx\n",
dangling_sink_streams); return -EINVAL;
}
ret = v4l2_subdev_link_validate_get_format(link->source, stream,
&source_fmt, states_locked); if (ret < 0) {
dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
link->source->entity->name, link->source->index,
stream); continue;
}
ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
&sink_fmt, states_locked); if (ret < 0) {
dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
link->sink->entity->name, link->sink->index,
stream); continue;
}
/* TODO: add stream number to link_validate() */
ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
&source_fmt, &sink_fmt); if (!ret) continue;
if (ret != -ENOIOCTLCMD) return ret;
ret = v4l2_subdev_link_validate_default(sink_subdev, link,
&source_fmt, &sink_fmt);
if (ret) return ret;
}
return 0;
}
int v4l2_subdev_link_validate(struct media_link *link)
{ struct v4l2_subdev *source_sd, *sink_sd; struct v4l2_subdev_state *source_state, *sink_state; bool states_locked; int ret;
/* * Links are validated in the context of the sink entity. Usage of this * helper on a sink that is not a subdev is a clear driver bug.
*/ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) return -EINVAL;
/* * If the source is a video device, delegate link validation to it. This * allows usage of this helper for subdev connected to a video output * device, provided that the driver implement the video output device's * .link_validate() operation.
*/ if (is_media_entity_v4l2_video_device(link->source->entity)) { struct media_entity *source = link->source->entity;
if (!source->ops || !source->ops->link_validate) { /* * Many existing drivers do not implement the required * .link_validate() operation for their video devices. * Print a warning to get the drivers fixed, and return * 0 to avoid breaking userspace. This should * eventually be turned into a WARN_ON() when all * drivers will have been fixed.
*/
pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
source->name); return 0;
}
/* * Avoid infinite loops in case a video device incorrectly uses * this helper function as its .link_validate() handler.
*/ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) return -EINVAL;
return source->ops->link_validate(link);
}
/* * If the source is still not a subdev, usage of this helper is a clear * driver bug.
*/ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) return -EINVAL;
/* Drivers that support streams do not need the legacy pad config */ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
state->pads = kvcalloc(sd->entity.num_pads, sizeof(*state->pads), GFP_KERNEL); if (!state->pads) {
ret = -ENOMEM; goto err;
}
}
if (sd->internal_ops && sd->internal_ops->init_state) { /* * There can be no race at this point, but we lock the state * anyway to satisfy lockdep checks.
*/
v4l2_subdev_lock_state(state);
ret = sd->internal_ops->init_state(sd, state);
v4l2_subdev_unlock_state(state);
if (ret) goto err;
}
return state;
err: if (state && state->pads)
kvfree(state->pads);
if (has_enable_streams != has_disable_streams) {
dev_err(dev, "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n",
sd->name); return -EINVAL;
}
if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { if (has_s_stream && !has_enable_streams) {
dev_err(dev, "subdev '%s' must implement .enable/disable_streams()\n",
sd->name);
return -EINVAL;
}
}
if (sd->ctrl_handler)
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
state = __v4l2_subdev_state_alloc(sd, name, key); if (IS_ERR(state)) return PTR_ERR(state);
if (pad >= state->sd->entity.num_pads) return NULL;
return &state->pads[pad].format;
}
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].fmt;
}
if (pad >= state->sd->entity.num_pads) return NULL;
return &state->pads[pad].crop;
}
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].crop;
}
if (pad >= state->sd->entity.num_pads) return NULL;
return &state->pads[pad].compose;
}
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].compose;
}
if (pad >= state->sd->entity.num_pads) return NULL;
return &state->pads[pad].interval;
}
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].interval;
}
/* Count number of formats needed */
for_each_active_route(routing, route) { /* * Each route needs a format on both ends of the route.
*/
new_configs.num_configs += 2;
}
if (new_configs.num_configs) {
new_configs.configs = kvcalloc(new_configs.num_configs, sizeof(*new_configs.configs),
GFP_KERNEL);
if (!new_configs.configs) return -ENOMEM;
}
/* * Fill in the 'pad' and stream' value for each item in the array from * the routing table
*/
idx = 0;
int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, conststruct v4l2_subdev_krouting *routing, enum v4l2_subdev_routing_restriction disallow)
{
u32 *remote_pads = NULL; unsignedint i, j; int ret = -EINVAL;
if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
GFP_KERNEL); if (!remote_pads) return -ENOMEM;
for (i = 0; i < sd->entity.num_pads; ++i)
remote_pads[i] = U32_MAX;
}
for (i = 0; i < routing->num_routes; ++i) { conststruct v4l2_subdev_route *route = &routing->routes[i];
/* Validate the sink and source pad numbers. */ if (route->sink_pad >= sd->entity.num_pads ||
!(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
i, route->sink_pad); goto out;
}
if (route->source_pad >= sd->entity.num_pads ||
!(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
i, route->source_pad); goto out;
}
/* * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a * sink pad must be routed to a single source pad.
*/ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { if (remote_pads[route->sink_pad] != U32_MAX &&
remote_pads[route->sink_pad] != route->source_pad) {
dev_dbg(sd->dev, "route %u attempts to mix %s streams\n",
i, "sink"); goto out;
}
}
/* * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a * source pad must originate from a single sink pad.
*/ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { if (remote_pads[route->source_pad] != U32_MAX &&
remote_pads[route->source_pad] != route->sink_pad) {
dev_dbg(sd->dev, "route %u attempts to mix %s streams\n",
i, "source"); goto out;
}
}
/* * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink * side can not do stream multiplexing, i.e. there can be only * a single stream in a sink pad.
*/ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { if (remote_pads[route->sink_pad] != U32_MAX) {
dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n",
i, "sink", route->sink_pad); goto out;
}
}
/* * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the * source side can not do stream multiplexing, i.e. there can * be only a single stream in a source pad.
*/ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { if (remote_pads[route->source_pad] != U32_MAX) {
dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n",
i, "source", route->source_pad); goto out;
}
}
if (remote_pads) {
remote_pads[route->sink_pad] = route->source_pad;
remote_pads[route->source_pad] = route->sink_pad;
}
for (j = i + 1; j < routing->num_routes; ++j) { conststruct v4l2_subdev_route *r = &routing->routes[j];
/* * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can * originate from the same (sink) stream.
*/ if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
route->sink_pad == r->sink_pad &&
route->sink_stream == r->sink_stream) {
dev_dbg(sd->dev, "routes %u and %u originate from same sink (%u/%u)\n",
i, j, route->sink_pad,
route->sink_stream); goto out;
}
/* * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end * at the same (source) stream.
*/ if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
route->source_pad == r->source_pad &&
route->source_stream == r->source_stream) {
dev_dbg(sd->dev, "routes %u and %u end at same source (%u/%u)\n",
i, j, route->source_pad,
route->source_stream); goto out;
}
}
}
/* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL;
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP;
/* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported.
*/ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP;
if (!streams_mask) return 0;
/* Fallback on .s_stream() if .enable_streams() isn't available. */
use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams);
if (!use_s_stream)
state = v4l2_subdev_lock_and_get_active_state(sd); else
state = NULL;
/* * Verify that the requested streams exist and that they are not * already enabled.
*/
if (found_streams != streams_mask) {
dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
streams_mask & ~found_streams, sd->entity.name, pad);
ret = -EINVAL; goto done;
}
if (enabled_streams) {
dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n",
enabled_streams, sd->entity.name, pad);
ret = -EALREADY; goto done;
}
already_streaming = v4l2_subdev_is_streaming(sd);
if (!use_s_stream) { /* Call the .enable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
streams_mask);
} else { /* Start streaming when the first pad is enabled. */ if (!already_streaming)
ret = v4l2_subdev_call(sd, video, s_stream, 1); else
ret = 0;
}
/* Mark the streams as enabled. */
v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true);
/* * TODO: When all the drivers have been changed to use * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), * instead of calling .s_stream() operation directly, we can remove * the privacy LED handling from call_s_stream() and do it here * for all cases.
*/ if (!use_s_stream && !already_streaming)
v4l2_subdev_enable_privacy_led(sd);
done: if (!use_s_stream)
v4l2_subdev_unlock_state(state);
/* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL;
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP;
/* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported.
*/ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP;
if (!streams_mask) return 0;
/* Fallback on .s_stream() if .disable_streams() isn't available. */
use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams);
if (!use_s_stream)
state = v4l2_subdev_lock_and_get_active_state(sd); else
state = NULL;
/* * Verify that the requested streams exist and that they are not * already disabled.
*/
if (found_streams != streams_mask) {
dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
streams_mask & ~found_streams, sd->entity.name, pad);
ret = -EINVAL; goto done;
}
if (enabled_streams != streams_mask) {
dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n",
streams_mask & ~enabled_streams, sd->entity.name, pad);
ret = -EALREADY; goto done;
}
if (!use_s_stream) { /* Call the .disable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
streams_mask);
} else { /* Stop streaming when the last streams are disabled. */
if (!(sd->enabled_pads & ~BIT_ULL(pad)))
ret = v4l2_subdev_call(sd, video, s_stream, 0); else
ret = 0;
}
int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
{ struct v4l2_subdev_state *state; struct v4l2_subdev_route *route; struct media_pad *pad;
u64 source_mask = 0; int pad_index = -1;
/* * Find the source pad. This helper is meant for subdevs that have a * single source pad, so failures shouldn't happen, but catch them * loudly nonetheless as they indicate a driver bug.
*/
media_entity_for_each_pad(&sd->entity, pad) { if (pad->flags & MEDIA_PAD_FL_SOURCE) {
pad_index = pad->index; break;
}
}
if (WARN_ON(pad_index == -1)) return -EINVAL;
if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { /* * As there's a single source pad, just collect all the source * streams.
*/
state = v4l2_subdev_lock_and_get_active_state(sd);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.