opencv - 如何使用 ffmpeg 和 opencv 编辑帧的内容?

标签 opencv ffmpeg

我将使用 OpenCV 和 ffmpeg 3.3 从 mp4 文件编辑一帧的内容。但是,我遇到了一些问题,例如视频的宽度和高度为零,某些功能已弃用。我已将旧功能更改为更新功能,但仍然无法提取正确的帧。请帮忙。

谁能展示使用 ffmpeg 3.3 从 mp4 文件中提取帧的示例?

#include "stdafx.h"
#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// FFmpeg
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/pixdesc.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>

}
#define CODEC_TYPE_VIDEO      AVMEDIA_TYPE_VIDEO

int main(int argc, char* argv[])
{   
    // initialize FFmpeg library
    av_register_all();
    //  av_log_set_level(AV_LOG_DEBUG);
    int ret;

    // open input file context
    AVFormatContext* inctx = nullptr;
    //ret = avformat_open_input(&inctx, infile, nullptr, nullptr);
    ret = avformat_open_input(&inctx, "C:\\car.mp4", nullptr, nullptr);
    // retrive input stream information


    ret = avformat_find_stream_info(inctx, nullptr);
    if (ret < 0) {
    std::cerr << "fail to avformat_find_stream_info: ret=" << ret;
    return 2;
    }

    // find primary video stream


    AVCodec* vcodec = nullptr;
    vcodec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
    if (!vcodec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    const int vstrm_idx = ret;
    AVStream* vstrm = inctx->streams[vstrm_idx];

    // open video decoder context
    AVCodecContext *c = NULL;
    c = avcodec_alloc_context3(vcodec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if (avcodec_open2(c, vcodec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    // print input video stream informataion
    // initialize sample scaler

    c->pix_fmt = AV_PIX_FMT_YUV420P;
    c->width = 1280;
    c->height = 720;
    if (vcodec->capabilities & CODEC_CAP_TRUNCATED)
        c->flags |= CODEC_FLAG_TRUNCATED;

    c->flags2 |= CODEC_FLAG2_FAST;
    int width = 1280;
    int height = 720;
    SwsContext* swsctx = sws_getCachedContext(nullptr, width,
        height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_RGB32,
        SWS_FAST_BILINEAR, NULL, NULL, NULL);
}

最佳答案

不确定是否要编写经过处理的帧,不记得了,但这似乎对我有用:

extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libavutil/mathematics.h>
}
#include "opencv2/opencv.hpp"

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc  avcodec_alloc_frame
#endif

using namespace std;
using namespace cv;

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->pts);
    char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->dts);
    char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->duration);

    char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->pts, time_base);
    char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->dts, time_base);
    char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->duration, time_base);

    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
        buf1, buf4,
        buf2, buf5,
        buf3, buf6,
        pkt->stream_index);

}


int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    AVFrame *pFrame = NULL;
    AVFrame *pFrameRGB = NULL;
    int frameFinished = 0;
    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    const char *in_filename, *out_filename;
    int ret, i;
    in_filename = "../../TestClips/Audio Video Sync Test.mp4";
    out_filename = "out.avi";

    // Initialize FFMPEG
    av_register_all();
    // Get input file format context
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
    {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    // Extract streams description
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
    {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    // Print detailed information about the input or output format,
    // such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    // Allocate an AVFormatContext for an output format.
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx)
    {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    // The output container format.
    ofmt = ofmt_ctx->oformat;

    // Allocating output streams
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        {
            out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }
    }

    // Open output file
    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    // Write output file header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }

    // Search for input video codec info
    AVCodec *in_codec = NULL;
    AVCodecContext* avctx = NULL;

    int video_stream_index = -1;
    for (int i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
        {
            video_stream_index = i;
            avctx = ifmt_ctx->streams[i]->codec;
            in_codec = avcodec_find_decoder(avctx->codec_id);
            if (!in_codec)
            {
                fprintf(stderr, "in codec not found\n");
                exit(1);
            }
            break;
        }
    }

    // Search for output video codec info
    AVCodec *out_codec = NULL;
    AVCodecContext* o_avctx = NULL;

    int o_video_stream_index = -1;
    for (int i = 0; i < ofmt_ctx->nb_streams; i++)
    {
        if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
        {
            o_video_stream_index = i;
            out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);

            if (!out_codec)
            {
                fprintf(stderr, "out codec not found\n");
                exit(1);
            }

            o_avctx = avcodec_alloc_context3(out_codec);

            o_avctx->height = avctx->height;
            o_avctx->width = avctx->width;
            o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;
            o_avctx->gop_size = 2;
            o_avctx->max_b_frames = 2;

           if (out_codec->pix_fmts)
           {
               o_avctx->pix_fmt = out_codec->pix_fmts[0];
           }
           else
           {
                o_avctx->pix_fmt = avctx->pix_fmt;
            }

            o_avctx->time_base = avctx->time_base;

            if (avcodec_open2(o_avctx, out_codec, NULL) < 0) 
            {
                fprintf(stderr, "cannot open encoder\n");
                exit(1);
            }

            break;
        }
    }

    // Show output format info
    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    // openCV pixel format
    enum AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
    // Data size
    int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
    // allocate buffer
    uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    // fill frame structure
    avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);

    // frame area
    int y_size = avctx->width * avctx->height;
    // Open input codec
    avcodec_open2(avctx, in_codec, NULL);
    // Main loop
    while (1)
    {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
        {
            break;
        }
        in_stream = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        //log_packet(ifmt_ctx, &pkt, "in");
        // copy packet
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;



        //log_packet(ofmt_ctx, &pkt, "out");
        if (pkt.stream_index == video_stream_index)
        {
            avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
            if (frameFinished)
            {

                struct SwsContext *img_convert_ctx;
                img_convert_ctx = sws_getCachedContext(NULL,
                    avctx->width,
                    avctx->height,
                    avctx->pix_fmt,
                    avctx->width,
                    avctx->height,
                    AV_PIX_FMT_BGR24,
                    SWS_BICUBIC,
                    NULL,
                    NULL,
                    NULL);
                sws_scale(img_convert_ctx,
                    ((AVPicture*)pFrame)->data,
                    ((AVPicture*)pFrame)->linesize,
                    0,
                    avctx->height,
                    ((AVPicture *)pFrameRGB)->data,
                    ((AVPicture *)pFrameRGB)->linesize);

                sws_freeContext(img_convert_ctx);

                // Do some image processing
                cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0], false);
                cv::GaussianBlur(img, img, Size(5, 5), 3);
                cv::imshow("Display", img);
                cv::waitKey(5);
                // --------------------------------
                // Transform back to initial format
                // --------------------------------
                img_convert_ctx = sws_getCachedContext(NULL,
                    avctx->width,
                    avctx->height,
                    AV_PIX_FMT_BGR24,
                    avctx->width,
                    avctx->height,
                    avctx->pix_fmt,
                    SWS_BICUBIC,
                    NULL,
                    NULL,
                    NULL);
                sws_scale(img_convert_ctx,
                    ((AVPicture*)pFrameRGB)->data,
                    ((AVPicture*)pFrameRGB)->linesize,
                    0,
                    avctx->height,
                    ((AVPicture *)pFrame)->data,
                    ((AVPicture *)pFrame)->linesize);

                int got_packet = 0;
                AVPacket enc_pkt = { 0 };
                av_init_packet(&enc_pkt);
                avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);

                if (o_avctx->coded_frame->pts != AV_NOPTS_VALUE)
                {
                    enc_pkt.pts = av_rescale_q(o_avctx->coded_frame->pts, o_avctx->time_base, ofmt_ctx->streams[video_stream_index]->time_base);
                }
                if (o_avctx->coded_frame->key_frame)
                {
                    enc_pkt.flags |= AV_PKT_FLAG_KEY;
                }

                av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
                av_packet_unref(&enc_pkt);
                sws_freeContext(img_convert_ctx);
            }
        }
        else // write sound frame
        {
            ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        }
        if (ret < 0)
        {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        // Decrease packet ref counter
        av_packet_unref(&pkt);
    }
    av_write_trailer(ofmt_ctx);
end:

    avcodec_close(avctx);
    avcodec_close(o_avctx);
    av_free(pFrame);
    av_free(pFrameRGB);

    avformat_close_input(&ifmt_ctx);

    // close output
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
    {
        avio_closep(&ofmt_ctx->pb);
    }
    avformat_free_context(ofmt_ctx);

    if (ret < 0 && ret != AVERROR_EOF)
    {
        char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
        av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
        fprintf(stderr, "Error occurred: %s\n", buf_err);
        return 1;
    }

    return 0;
}

关于opencv - 如何使用 ffmpeg 和 opencv 编辑帧的内容?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45672642/

相关文章:

c++ - OpenCV中重载函数的实例

opencv - 在opencv中计算相机的位置和方向

c# 如何从 nvlc 捕获音频并引发 Accord.Audio.NewFrameEventArgs

nginx - 使用 ffmpeg 在文件夹中流式传输视频

ffmpeg : how to extract 1st video, 一个命令行中视频文件的英文音频和法文字幕?

javascript - 下载特定歌曲时 Ffmpeg 崩溃

c++ - 在 OpenCV 中将 SVD 应用于 YCbCr 图像

python - 在OpenCV中删除图像中的噪声而不会丢失数据

opencv - 从 Vec3b 转换为 Mat3b

ffmpeg速度编码问题