ffmpeg 结合 opencv 显示ps流文件

发布于:2024-10-09 ⋅ 阅读:(117) ⋅ 点赞:(0)

存储的ps 流文件如何显示

使用ffmpeg 和 opencv 做demo

结合opencv 和 ffmpeg 显示ps文件

// showps.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include <iostream>
#include <opencv2/opencv.hpp>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#ifdef _WIN32
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"swscale.lib")
#endif
#ifdef _DEBUG
#pragma comment(lib,"opencv_world490d.lib")
#else
#pragma comment(lib,"opencv_world490.lib")
#endif

#define AV_ERROR_REPORT char buffer[64];\
av_strerror(ret, &buffer[0], 64);\
std::cerr << "Error sending packet to decoder: " << buffer << std::endl
int main() {
    // Initialize FFmpeg


    // Open video file
    AVFormatContext* format_context = avformat_alloc_context();
    const AVInputFormat* input_format = av_find_input_format("mpeg");
    if (!input_format) {
        std::cerr << "Failed to find input format" << std::endl;
        return -1;
    }

    format_context->iformat = input_format;
    if (avformat_open_input(&format_context, "o1.ps", nullptr, nullptr) != 0) {
        std::cerr << "Failed to open input file" << std::endl;
        return -1;
    }
    if (avformat_find_stream_info(format_context, nullptr) < 0) {
        std::cerr << "Failed to find stream info" << std::endl;
        avformat_close_input(&format_context);
        return -1;
    }

    // Find video stream
    int video_stream_index = -1;
    for (unsigned int i = 0; i < format_context->nb_streams; ++i) {
        if (format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_index = i;
            break;
        }
    }
    video_stream_index = 0;
    if (video_stream_index == -1) {
        std::cerr << "No video stream found" << std::endl;
        avformat_close_input(&format_context);
        return -1;
    }

    // Find decoder for the stream
    const AVCodec* codec = avcodec_find_decoder(format_context->streams[video_stream_index]->codecpar->codec_id);
    if (!codec) {
        std::cerr << "Failed to find decoder for video stream" << std::endl;
        avformat_close_input(&format_context);
        return -1;
    }

    // Create codec context for the decoder
    AVCodecContext* codec_context = avcodec_alloc_context3(codec);
    if (!codec_context) {
        std::cerr << "Failed to allocate codec context" << std::endl;
        avformat_close_input(&format_context);
        return -1;
    }
    if (avcodec_parameters_to_context(codec_context, format_context->streams[video_stream_index]->codecpar) < 0) {
        std::cerr << "Failed to copy codec parameters to codec context" << std::endl;
        avcodec_free_context(&codec_context);
        avformat_close_input(&format_context);
        return -1;
    }
    if (avcodec_open2(codec_context, codec, nullptr) < 0) {
        std::cerr << "Failed to open codec" << std::endl;
        avcodec_free_context(&codec_context);
        avformat_close_input(&format_context);
        return -1;
    }

    // Allocate video frame and initialize SwsContext
    AVFrame* frame = av_frame_alloc();
    if (!frame) {
        std::cerr << "Failed to allocate frame" << std::endl;
        avcodec_free_context(&codec_context);
        avformat_close_input(&format_context);
        return -1;
    }
    struct SwsContext* sws_context = sws_getContext(codec_context->width, codec_context->height, codec_context->pix_fmt,
        codec_context->width, codec_context->height, AV_PIX_FMT_BGR24,
        SWS_BILINEAR, nullptr, nullptr, nullptr);
    if (!sws_context) {
        std::cerr << "Failed to initialize SwsContext" << std::endl;
        av_frame_free(&frame);
        avcodec_free_context(&codec_context);
        avformat_close_input(&format_context);
        return -1;
    }

    // Read frames from the stream and display using OpenCV
    AVPacket packet;
    while (av_read_frame(format_context, &packet) >= 0) {
        if (packet.stream_index == video_stream_index) {
            // Decode the frame
            int ret = avcodec_send_packet(codec_context, &packet);
            if (ret < 0) {
                AV_ERROR_REPORT;
                break;
            }

            while (ret >= 0) {
                ret = avcodec_receive_frame(codec_context, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                    break;
                else if (ret < 0) {
                    AV_ERROR_REPORT;
                    break;
                }

                // Convert frame to BGR
                AVFrame* bgr_frame = av_frame_alloc();
                if (!bgr_frame) {
                    std::cerr << "Failed to allocate BGR frame" << std::endl;
                    break;
                }
                bgr_frame->width = codec_context->width;
                bgr_frame->height = codec_context->height;
                bgr_frame->format = AV_PIX_FMT_BGR24;
                av_frame_get_buffer(bgr_frame, 0);

                sws_scale(sws_context, frame->data, frame->linesize, 0, codec_context->height,
                    bgr_frame->data, bgr_frame->linesize);

                // Convert AVFrame to OpenCV Mat
                cv::Mat bgrMat(codec_context->height, codec_context->width, CV_8UC3, bgr_frame->data[0]);

                // Display using OpenCV
                cv::imshow("Frame", bgrMat);
                cv::waitKey(1);

                // Free resources
                av_frame_free(&bgr_frame);
            }
        }
        av_packet_unref(&packet);
    }

    // Free resources
    av_frame_free(&frame);
    sws_freeContext(sws_context);
    avcodec_free_context(&codec_context);
    avformat_close_input(&format_context);

    return 0;
}