/* * generic encoding-related code * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/** * This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only * formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set). * This is used to set said flag generically for said encoders.
*/ int intra_only_flag;
/** * An audio frame with less than required samples has been submitted (and * potentially padded with silence). Reject all subsequent frames.
*/ int last_audio_frame;
} EncodeContext;
avpkt->size = size;
ret = avctx->get_encode_buffer(avctx, avpkt, flags); if (ret < 0) goto fail;
if (!avpkt->data || !avpkt->buf) {
av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
ret = AVERROR(EINVAL); goto fail;
}
memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
ret = 0;
fail: if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
av_packet_unref(avpkt);
}
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
{ int ret; if (sub->start_display_time) {
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); return -1;
}
ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
avctx->frame_num++; return ret;
}
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
if (avci->draining) return AVERROR_EOF;
if (!avci->buffer_frame->buf[0]) return AVERROR(EAGAIN);
av_frame_move_ref(frame, avci->buffer_frame);
#if FF_API_FRAME_KEY
FF_DISABLE_DEPRECATION_WARNINGS if (frame->key_frame)
frame->flags |= AV_FRAME_FLAG_KEY;
FF_ENABLE_DEPRECATION_WARNINGS #endif #if FF_API_INTERLACED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS if (frame->interlaced_frame)
frame->flags |= AV_FRAME_FLAG_INTERLACED; if (frame->top_field_first)
frame->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
FF_ENABLE_DEPRECATION_WARNINGS #endif
return 0;
}
int ff_encode_reordered_opaque(AVCodecContext *avctx,
AVPacket *pkt, const AVFrame *frame)
{ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) { int ret = av_buffer_replace(&pkt->opaque_ref, frame->opaque_ref); if (ret < 0) return ret;
pkt->opaque = frame->opaque;
}
return 0;
}
int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
AVFrame *frame, int *got_packet)
{ const FFCodec *const codec = ffcodec(avctx->codec); int ret;
ret = codec->cb.encode(avctx, avpkt, frame, got_packet);
emms_c();
av_assert0(ret <= 0);
if (!ret && *got_packet) { if (avpkt->data) {
ret = encode_make_refcounted(avctx, avpkt); if (ret < 0) goto unref; // Date returned by encoders must always be ref-counted
av_assert0(avpkt->buf);
}
// set the timestamps for the simple no-delay case // encoders with delay have to set the timestamps themselves if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
(frame && (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))) { if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration) { if (frame->duration)
avpkt->duration = frame->duration; elseif (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
}
ret = ff_encode_reordered_opaque(avctx, avpkt, frame); if (ret < 0) goto unref;
}
// dts equals pts unless there is reordering // there can be no reordering if there is no encoder delay if (!(avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER) ||
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
(codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))
avpkt->dts = avpkt->pts;
} else {
unref:
av_packet_unref(avpkt);
}
if (CONFIG_FRAME_THREAD_ENCODER && avci->frame_thread_encoder) /* This will unref frame. */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet); else {
ret = ff_encode_encode_cb(avctx, avpkt, frame, &got_packet);
}
if (avci->draining && !got_packet)
avci->draining_done = 1;
return ret;
}
staticint encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{ int ret;
while (!avpkt->data && !avpkt->side_data) {
ret = encode_simple_internal(avctx, avpkt); if (ret < 0) return ret;
}
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0'; if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) return AVERROR(EINVAL);
}
if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET) {
ret = ffcodec(avctx->codec)->cb.receive_packet(avctx, avpkt); if (ret < 0)
av_packet_unref(avpkt); else // Encoders must always return ref-counted buffers. // Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
} else
ret = encode_simple_receive_packet(avctx, avpkt); if (ret >= 0)
avpkt->flags |= encode_ctx(avci)->intra_only_flag;
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { /* extract audio service type metadata */
AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE); if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
/* check for valid frame size */ if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) { /* if we already got an undersized frame, that must have been the last */ if (ec->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size); return AVERROR(EINVAL);
} if (src->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) > frame_size (%d)\n", src->nb_samples, avctx->frame_size); return AVERROR(EINVAL);
} if (src->nb_samples < avctx->frame_size) {
ec->last_audio_frame = 1; if (!(avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)) { int pad_samples = avci->pad_samples ? avci->pad_samples : avctx->frame_size; int out_samples = (src->nb_samples + pad_samples - 1) / pad_samples * pad_samples;
if (out_samples != src->nb_samples) {
ret = pad_last_frame(avctx, dst, src, out_samples); if (ret < 0) return ret; goto finish;
}
}
}
}
}
ret = av_frame_ref(dst, src); if (ret < 0) return ret;
finish:
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
ret = encode_generate_icc_profile(avctx, dst); if (ret < 0) return ret;
}
// unset frame duration unless AV_CODEC_FLAG_FRAME_DURATION is set, // since otherwise we cannot be sure that whatever value it has is in the // right timebase, so we would produce an incorrect value, which is worse // than none at all if (!(avctx->flags & AV_CODEC_FLAG_FRAME_DURATION))
dst->duration = 0;
return 0;
}
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal; int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) return AVERROR(EINVAL);
if (avci->draining) return AVERROR_EOF;
if (avci->buffer_frame->buf[0]) return AVERROR(EAGAIN);
if (!frame) {
avci->draining = 1;
} else {
ret = encode_send_frame_internal(avctx, frame); if (ret < 0) return ret;
}
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt); if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) return ret;
}
avctx->frame_num++;
return 0;
}
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal; int ret;
av_packet_unref(avpkt);
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) return AVERROR(EINVAL);
if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
av_packet_move_ref(avpkt, avci->buffer_pkt);
} else {
ret = encode_receive_packet_internal(avctx, avpkt); if (ret < 0) return ret;
}
if (!av_get_pix_fmt_name(avctx->pix_fmt)) {
av_log(avctx, AV_LOG_ERROR, "Invalid video pixel format: %d\n",
avctx->pix_fmt); return AVERROR(EINVAL);
}
ret = avcodec_get_supported_config(avctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
0, (constvoid **) &pix_fmts, &num_pix_fmts); if (ret < 0) return ret;
if (pix_fmts) { for (i = 0; i < num_pix_fmts; i++) if (avctx->pix_fmt == pix_fmts[i]) break; if (i == num_pix_fmts) {
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is not supported by the %s encoder.\n",
av_get_pix_fmt_name(avctx->pix_fmt), c->name);
ret = avcodec_get_supported_config(avctx, NULL, AV_CODEC_CONFIG_SAMPLE_FORMAT,
0, (constvoid **) &sample_fmts,
&num_sample_fmts); if (ret < 0) return ret; if (sample_fmts) { for (i = 0; i < num_sample_fmts; i++) { if (avctx->sample_fmt == sample_fmts[i]) break; if (avctx->ch_layout.nb_channels == 1 &&
av_get_planar_sample_fmt(avctx->sample_fmt) ==
av_get_planar_sample_fmt(sample_fmts[i])) {
avctx->sample_fmt = sample_fmts[i]; break;
}
} if (i == num_sample_fmts) {
av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is not supported by the %s encoder\n",
av_get_sample_fmt_name(avctx->sample_fmt), c->name);
ret = avcodec_get_supported_config(avctx, NULL, AV_CODEC_CONFIG_SAMPLE_RATE,
0, (constvoid **) &supported_samplerates,
&num_samplerates); if (ret < 0) return ret; if (supported_samplerates) { for (i = 0; i < num_samplerates; i++) if (avctx->sample_rate == supported_samplerates[i]) break; if (i == num_samplerates) {
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported by the %s encoder\n",
avctx->sample_rate, c->name);
av_log(avctx, AV_LOG_ERROR, "Supported sample rates:\n"); for (int p = 0; supported_samplerates[p]; p++)
av_log(avctx, AV_LOG_ERROR, " %d\n", supported_samplerates[p]);
return AVERROR(EINVAL);
}
}
ret = avcodec_get_supported_config(avctx, NULL, AV_CODEC_CONFIG_CHANNEL_LAYOUT,
0, (constvoid **) &ch_layouts, &num_ch_layouts); if (ret < 0) return ret; if (ch_layouts) { for (i = 0; i < num_ch_layouts; i++) { if (!av_channel_layout_compare(&avctx->ch_layout, &ch_layouts[i])) break;
} if (i == num_ch_layouts) { char buf[512]; int ret = av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf));
av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported by the %s encoder\n",
ret > 0 ? buf : "?", c->name);
av_log(avctx, AV_LOG_ERROR, "Supported channel layouts:\n"); for (int p = 0; ch_layouts[p].nb_channels; p++) {
ret = av_channel_layout_describe(&ch_layouts[p], buf, sizeof(buf));
av_log(avctx, AV_LOG_ERROR, " %s\n", ret > 0 ? buf : "?");
} return AVERROR(EINVAL);
}
}
if (!avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = av_get_exact_bits_per_sample(avctx->codec_id); if (!avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = 8 * av_get_bytes_per_sample(avctx->sample_fmt);
return 0;
}
int ff_encode_preinit(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
EncodeContext *ec = encode_ctx(avci); int ret = 0;
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); return AVERROR(EINVAL);
}
if (avctx->bit_rate < 0) {
av_log(avctx, AV_LOG_ERROR, "The encoder bitrate is negative.\n"); return AVERROR(EINVAL);
}
if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE &&
!(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
av_log(avctx, AV_LOG_ERROR, "The copy_opaque flag is set, but the " "encoder does not support it.\n"); return AVERROR(EINVAL);
}
switch (avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: ret = encode_preinit_video(avctx); break; case AVMEDIA_TYPE_AUDIO: ret = encode_preinit_audio(avctx); break;
} if (ret < 0) return ret;
if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
&& avctx->bit_rate>0 && avctx->bit_rate<1000) {
av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
}
if (!avctx->rc_initial_buffer_occupancy)
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY)
ec->intra_only_flag = AV_PKT_FLAG_KEY;
if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_ENCODE) {
avci->in_frame = av_frame_alloc(); if (!avci->in_frame) return AVERROR(ENOMEM);
}
if ((avctx->flags & AV_CODEC_FLAG_RECON_FRAME)) { if (!(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_RECON_FRAME)) {
av_log(avctx, AV_LOG_ERROR, "Reconstructed frame output requested " "from an encoder not supporting it\n"); return AVERROR(ENOSYS);
}
avci->recon_frame = av_frame_alloc(); if (!avci->recon_frame) return AVERROR(ENOMEM);
}
for (i = 0; i < avctx->nb_coded_side_data; i++) if (avctx->coded_side_data[i].type == AV_PKT_DATA_CPB_PROPERTIES) return (AVCPBProperties *)avctx->coded_side_data[i].data;
props = av_cpb_properties_alloc(&size); if (!props) return NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.