通过opencv加载、保存视频

发布于:2024-12-23 ⋅ 阅读:(15) ⋅ 点赞:(0)

【欢迎关注编码小哥,学习更多实用的编程方法和技巧】

代码举例:

//opencv加载本地视频文件的2种方法
//方法一:
#if 0
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
//using namespace cv;

int main()
{
	// 创建VideoCapture对象
	cv::VideoCapture capture;

	// 打开视频文件
	capture.open("D:/opencv/videos/xxx.mp4");

	// 检查视频是否成功打开
	if (!capture.isOpened())
	{
		std::cout << "无法打开视频!" << std::endl;
		return -1;
	}

	std::cout << "视频加载成功" << std::endl << std::endl;

	// 获取视频信息 - 帧宽和帧高
	double frameWidth = capture.get(cv::CAP_PROP_FRAME_WIDTH);
	double frameHeight = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
	std::cout << "视频帧宽为: " << frameWidth << std::endl;
	std::cout << "视频帧高为: " << frameHeight << std::endl;

	// 获取视频信息 - 帧率
	double frameRate = capture.get(cv::CAP_PROP_FPS);
	std::cout << "视频帧率为: " << frameRate << std::endl;

	// 获取视频信息 - 总帧数
	double totalFrames = capture.get(cv::CAP_PROP_FRAME_COUNT);
	std::cout << "视频总帧数为: " << totalFrames << std::endl;

	// 循环读取视频帧
	cv::Mat frameImg;
	long nCount = 1;
	while (true)
	{
		// 读取当前帧
		capture >> frameImg;

		// 显示当前帧
		if (!frameImg.empty())
		{
			imshow("当前帧", frameImg);
			if (char(cv::waitKey(int(1000 / frameRate))) == 'q') // 按'q'键退出
				break;
		}
		else
		{
			break;
		}

		nCount++;
	}

	// 释放视频资源
	capture.release();
	cv::waitKey(1);
	cv::destroyAllWindows();
	return 0;
}
#endif

//方法二:
#if 0
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	// 创建VideoCapture对象
	VideoCapture capture;

	// 打开视频文件
	capture.open("D:/opencv/videos/big_buck_bunny.mp4");

	// 检查视频是否成功打开
	if (!capture.isOpened())
	{
		std::cout << "无法打开视频!" << std::endl;
		return -1;
	}

	std::cout << "视频加载成功" << std::endl << std::endl;

	// 获取视频信息 - 帧宽和帧高
	double frameWidth = capture.get(CAP_PROP_FRAME_WIDTH);
	double frameHeight = capture.get(CAP_PROP_FRAME_HEIGHT);
	std::cout << "视频帧宽为: " << frameWidth << std::endl;
	std::cout << "视频帧高为: " << frameHeight << std::endl;

	// 获取视频信息 - 帧率
	double frameRate = capture.get(CAP_PROP_FPS);
	std::cout << "视频帧率为: " << frameRate << std::endl;

	// 获取视频信息 - 总帧数
	double totalFrames = capture.get(CAP_PROP_FRAME_COUNT);
	std::cout << "视频总帧数为: " << totalFrames << std::endl;

	// 循环读取视频帧
	Mat frameImg;
	int nCount = 1;
	while (true)
	{
		// 读取当前帧
		capture >> frameImg;

		// 显示当前帧
		if (!frameImg.empty())
		{
			imshow("当前帧", frameImg);
			if (char(waitKey(int(1000 / frameRate))) == 'q') // 按'q'键退出
				break;
		}
		else
		{
			break;
		}

		nCount++;
	}

	// 释放视频资源
	capture.release();
	waitKey(1);
	destroyAllWindows();

	return 0;
}
#endif

//从摄像头读取视频流
#if 0
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	// 创建VideoCapture对象
	VideoCapture capture;

	// 打开摄像头
	capture.open(0,CAP_DSHOW);

	// 检查摄像头是否成功打开
	if (!capture.isOpened())
	{
		std::cout << "无法打开摄像头!" << std::endl;
		return -1;
	}

	std::cout << "摄像头加载成功" << std::endl << std::endl;

	// 获取摄像头信息 - 帧宽和帧高
	double frameWidth = capture.get(CAP_PROP_FRAME_WIDTH);
	double frameHeight = capture.get(CAP_PROP_FRAME_HEIGHT);
	std::cout << "摄像头帧宽为: " << frameWidth << std::endl;
	std::cout << "摄像头帧高为: " << frameHeight << std::endl;

	// 获取摄像头信息 - 帧率
	double frameRate = capture.get(CAP_PROP_FPS);
	std::cout << "摄像头帧率为: " << frameRate << std::endl;

	// 循环读取摄像头帧
	Mat frameImg;
	int nCount = 1;
	while (true)
	{
		// 读取当前帧
		capture >> frameImg;

		// 显示当前帧
		if (!frameImg.empty())
		{
			imshow("当前帧", frameImg);
			if (char(waitKey(int(1000 / frameRate))) == 'q') // 按'q'键退出
				break;
		}
		else
		{
			break;
		}

		nCount++;
	}

	// 释放视频资源
	capture.release();
	waitKey(1);
	destroyAllWindows();

	return 0;
}
#endif

//从URL地址获取视频流
#if 0
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	// 创建VideoCapture对象
	VideoCapture capture;

	// 打开视频文件
	capture.open("https://test-streams.mux.dev/x36xhzz/x36xhzz.m3u8", CAP_ANY);

	// 检查视频是否成功打开
	if (!capture.isOpened())
	{
		std::cout << "无法打开视频!" << std::endl;
		return -1;
	}

	std::cout << "视频加载成功" << std::endl << std::endl;

	// 获取视频信息 - 帧宽和帧高
	double frameWidth = capture.get(CAP_PROP_FRAME_WIDTH);
	double frameHeight = capture.get(CAP_PROP_FRAME_HEIGHT);
	std::cout << "视频帧宽为: " << frameWidth << std::endl;
	std::cout << "视频帧高为: " << frameHeight << std::endl;

	// 获取视频信息 - 帧率
	double frameRate = capture.get(CAP_PROP_FPS);
	std::cout << "视频帧率为: " << frameRate << std::endl;

	// 获取视频信息 - 总帧数
	double totalFrames = capture.get(CAP_PROP_FRAME_COUNT);
	std::cout << "视频总帧数为: " << totalFrames << std::endl;

	// 循环读取视频帧
	Mat frameImg;
	int nCount = 1;
	while (true)
	{
		// 读取当前帧
		capture >> frameImg;

		// 显示当前帧
		if (!frameImg.empty())
		{
			imshow("当前帧", frameImg);
			if (char(waitKey(int(1000 / frameRate))) == 'q') // 按'q'键退出
				break;
		}
		else
		{
			break;
		}

		nCount++;
	}

	// 释放视频资源
	capture.release();
	waitKey(1);
	destroyAllWindows();

	return 0;
}
#endif

//保存摄像头录制的文件,本地视频流文件和从URL获取的视频流文件的保存同理
#if 0
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	// 创建VideoCapture对象以打开默认摄像头
	VideoCapture capture(0);

	// 检查摄像头是否成功打开
	if (!capture.isOpened())
	{
		std::cout << "无法打开摄像头!" << std::endl;
		return -1;
	}

	// 获取摄像头的帧宽和帧高
	double frameWidth = capture.get(CAP_PROP_FRAME_WIDTH);
	double frameHeight = capture.get(CAP_PROP_FRAME_HEIGHT);
	std::cout << "摄像头帧宽为: " << frameWidth << std::endl;
	std::cout << "摄像头帧高为: " << frameHeight << std::endl;

	// 定义视频编码和输出文件
	int fourcc = VideoWriter::fourcc('M', 'J', 'P', 'G'); // 使用MJPEG编码
	VideoWriter writer("D:\\opencv\\videos\\output.avi", fourcc, 30, Size((int)frameWidth, (int)frameHeight), true);

	// 检查VideoWriter是否成功打开
	if (!writer.isOpened())
	{
		std::cout << "无法打开视频文件进行写入!" << std::endl;
		return -1;
	}

	std::cout << "开始录制视频..." << std::endl;

	// 循环读取摄像头帧
	Mat frameImg;
	while (true)
	{
		// 读取当前帧
		capture >> frameImg;

		// 检查帧是否为空
		if (frameImg.empty())
		{
			std::cout << "读取帧失败!" << std::endl;
			break;
		}

		// 将当前帧写入视频文件
		writer.write(frameImg);

		// 显示当前帧
		imshow("摄像头画面", frameImg);

		// 按'q'键退出
		if (char(waitKey(1)) == 'q')
			break;
	}

	// 释放资源
	capture.release();
	writer.release();
	waitKey(0);
	destroyAllWindows();

	std::cout << "视频录制完成,保存为output.avi" << std::endl;

	return 0;
}
#endif

刚开始遇到报错:

解决方法:

上面报错的未声明的标识符CV_CAP_PROP_FRAME_WIDTH、CV_CAP_PROP_FRAME_HEIGHT、CV_CAP_PROP_FPS、CV_CAP_PROP_FRAME_COUNT 是 OpenCV 中用于访问视频捕捉属性的一些常量,用于获取或设置捕捉的帧的宽度、高度、帧率、总帧数。遇到 “未定义标识符” 的错误,可能是因为以下几个原因:

1、未包含正确的头文件

检查代码中是否包含了 OpenCV 的相关头文件,比如:

#include <opencv2/opencv.hpp>

2、OpenCV 版本问题

       不同版本的 OpenCV 可能会有所不同,尤其是在较新版本中,常量名称可能已被更改。检查使用的版本文档以确认常量名称是否正确。

3、命名空间问题

       在 OpenCV 4.x 或更高版本中,常量可能在 cv 命名空间内,可以这样使用:

cv::VideoCapture cap;
cap.get(cv::CAP_PROP_FRAME_WIDTH);

4、库未链接

         确保在编译时,已经正确链接了 OpenCV 库,以便使用相关的函数和常量。

 通过追溯opencv源码可以知道,我这里的原因属于第2种,版本不同,常量名称不同。

/** @brief cv::VideoCapture generic properties identifier.

 Reading / writing properties involves many layers. Some unexpected result might happens along this chain.
 Effective behaviour depends from device hardware, driver and API Backend.
 @sa videoio_flags_others, VideoCapture::get(), VideoCapture::set()
*/
enum VideoCaptureProperties {
       CAP_PROP_POS_MSEC       =0, //!< Current position of the video file in milliseconds.
       CAP_PROP_POS_FRAMES     =1, //!< 0-based index of the frame to be decoded/captured next.
       CAP_PROP_POS_AVI_RATIO  =2, //!< Relative position of the video file: 0=start of the film, 1=end of the film.
       CAP_PROP_FRAME_WIDTH    =3, //!< Width of the frames in the video stream.
       CAP_PROP_FRAME_HEIGHT   =4, //!< Height of the frames in the video stream.
       CAP_PROP_FPS            =5, //!< Frame rate.
       CAP_PROP_FOURCC         =6, //!< 4-character code of codec. see VideoWriter::fourcc .
       CAP_PROP_FRAME_COUNT    =7, //!< Number of frames in the video file.
       CAP_PROP_FORMAT         =8, //!< Format of the %Mat objects (see Mat::type()) returned by VideoCapture::retrieve().
                                   //!< Set value -1 to fetch undecoded RAW video streams (as Mat 8UC1).
       CAP_PROP_MODE           =9, //!< Backend-specific value indicating the current capture mode.
       CAP_PROP_BRIGHTNESS    =10, //!< Brightness of the image (only for those cameras that support).
       CAP_PROP_CONTRAST      =11, //!< Contrast of the image (only for cameras).
       CAP_PROP_SATURATION    =12, //!< Saturation of the image (only for cameras).
       CAP_PROP_HUE           =13, //!< Hue of the image (only for cameras).
       CAP_PROP_GAIN          =14, //!< Gain of the image (only for those cameras that support).
       CAP_PROP_EXPOSURE      =15, //!< Exposure (only for those cameras that support).
       CAP_PROP_CONVERT_RGB   =16, //!< Boolean flags indicating whether images should be converted to RGB. <br/>
                                   //!< *GStreamer note*: The flag is ignored in case if custom pipeline is used. It's user responsibility to interpret pipeline output.
       CAP_PROP_WHITE_BALANCE_BLUE_U =17, //!< Currently unsupported.
       CAP_PROP_RECTIFICATION =18, //!< Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently).
       CAP_PROP_MONOCHROME    =19,
       CAP_PROP_SHARPNESS     =20,
       CAP_PROP_AUTO_EXPOSURE =21, //!< DC1394: exposure control done by camera, user can adjust reference level using this feature.
       CAP_PROP_GAMMA         =22,
       CAP_PROP_TEMPERATURE   =23,
       CAP_PROP_TRIGGER       =24,
       CAP_PROP_TRIGGER_DELAY =25,
       CAP_PROP_WHITE_BALANCE_RED_V =26,
       CAP_PROP_ZOOM          =27,
       CAP_PROP_FOCUS         =28,
       CAP_PROP_GUID          =29,
       CAP_PROP_ISO_SPEED     =30,
       CAP_PROP_BACKLIGHT     =32,
       CAP_PROP_PAN           =33,
       CAP_PROP_TILT          =34,
       CAP_PROP_ROLL          =35,
       CAP_PROP_IRIS          =36,
       CAP_PROP_SETTINGS      =37, //!< Pop up video/camera filter dialog (note: only supported by DSHOW backend currently. The property value is ignored)
       CAP_PROP_BUFFERSIZE    =38,
       CAP_PROP_AUTOFOCUS     =39,
       CAP_PROP_SAR_NUM       =40, //!< Sample aspect ratio: num/den (num)
       CAP_PROP_SAR_DEN       =41, //!< Sample aspect ratio: num/den (den)
       CAP_PROP_BACKEND       =42, //!< Current backend (enum VideoCaptureAPIs). Read-only property
       CAP_PROP_CHANNEL       =43, //!< Video input or Channel Number (only for those cameras that support)
       CAP_PROP_AUTO_WB       =44, //!< enable/ disable auto white-balance
       CAP_PROP_WB_TEMPERATURE=45, //!< white-balance color temperature
       CAP_PROP_CODEC_PIXEL_FORMAT =46,    //!< (read-only) codec's pixel format. 4-character code - see VideoWriter::fourcc . Subset of [AV_PIX_FMT_*](https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/raw.c) or -1 if unknown
       CAP_PROP_BITRATE       =47, //!< (read-only) Video bitrate in kbits/s
       CAP_PROP_ORIENTATION_META=48, //!< (read-only) Frame rotation defined by stream meta (applicable for FFmpeg and AVFoundation back-ends only)
       CAP_PROP_ORIENTATION_AUTO=49, //!< if true - rotates output frames of CvCapture considering video file's metadata  (applicable for FFmpeg and AVFoundation back-ends only) (https://github.com/opencv/opencv/issues/15499)
       CAP_PROP_HW_ACCELERATION=50, //!< (**open-only**) Hardware acceleration type (see #VideoAccelerationType). Setting supported only via `params` parameter in cv::VideoCapture constructor / .open() method. Default value is backend-specific.
       CAP_PROP_HW_DEVICE      =51, //!< (**open-only**) Hardware device index (select GPU if multiple available). Device enumeration is acceleration type specific.
       CAP_PROP_HW_ACCELERATION_USE_OPENCL=52, //!< (**open-only**) If non-zero, create new OpenCL context and bind it to current thread. The OpenCL context created with Video Acceleration context attached it (if not attached yet) for optimized GPU data copy between HW accelerated decoder and cv::UMat.
       CAP_PROP_OPEN_TIMEOUT_MSEC=53, //!< (**open-only**) timeout in milliseconds for opening a video capture (applicable for FFmpeg and GStreamer back-ends only)
       CAP_PROP_READ_TIMEOUT_MSEC=54, //!< (**open-only**) timeout in milliseconds for reading from a video capture (applicable for FFmpeg and GStreamer back-ends only)
       CAP_PROP_STREAM_OPEN_TIME_USEC =55, //<! (read-only) time in microseconds since Jan 1 1970 when stream was opened. Applicable for FFmpeg backend only. Useful for RTSP and other live streams
       CAP_PROP_VIDEO_TOTAL_CHANNELS = 56, //!< (read-only) Number of video channels
       CAP_PROP_VIDEO_STREAM = 57, //!< (**open-only**) Specify video stream, 0-based index. Use -1 to disable video stream from file or IP cameras. Default value is 0.
       CAP_PROP_AUDIO_STREAM = 58, //!< (**open-only**) Specify stream in multi-language media files, -1 - disable audio processing or microphone. Default value is -1.
       CAP_PROP_AUDIO_POS = 59, //!< (read-only) Audio position is measured in samples. Accurate audio sample timestamp of previous grabbed fragment. See CAP_PROP_AUDIO_SAMPLES_PER_SECOND and CAP_PROP_AUDIO_SHIFT_NSEC.
       CAP_PROP_AUDIO_SHIFT_NSEC = 60, //!< (read only) Contains the time difference between the start of the audio stream and the video stream in nanoseconds. Positive value means that audio is started after the first video frame. Negative value means that audio is started before the first video frame.
       CAP_PROP_AUDIO_DATA_DEPTH = 61, //!< (open, read) Alternative definition to bits-per-sample, but with clear handling of 32F / 32S
       CAP_PROP_AUDIO_SAMPLES_PER_SECOND = 62, //!< (open, read) determined from file/codec input. If not specified, then selected audio sample rate is 44100
       CAP_PROP_AUDIO_BASE_INDEX = 63, //!< (read-only) Index of the first audio channel for .retrieve() calls. That audio channel number continues enumeration after video channels.
       CAP_PROP_AUDIO_TOTAL_CHANNELS = 64, //!< (read-only) Number of audio channels in the selected audio stream (mono, stereo, etc)
       CAP_PROP_AUDIO_TOTAL_STREAMS = 65, //!< (read-only) Number of audio streams.
       CAP_PROP_AUDIO_SYNCHRONIZE = 66, //!< (open, read) Enables audio synchronization.
       CAP_PROP_LRF_HAS_KEY_FRAME = 67, //!< FFmpeg back-end only - Indicates whether the Last Raw Frame (LRF), output from VideoCapture::read() when VideoCapture is initialized with VideoCapture::open(CAP_FFMPEG, {CAP_PROP_FORMAT, -1}) or VideoCapture::set(CAP_PROP_FORMAT,-1) is called before the first call to VideoCapture::read(), contains encoded data for a key frame.
       CAP_PROP_CODEC_EXTRADATA_INDEX = 68, //!< Positive index indicates that returning extra data is supported by the video back end.  This can be retrieved as cap.retrieve(data, <returned index>).  E.g. When reading from a h264 encoded RTSP stream, the FFmpeg backend could return the SPS and/or PPS if available (if sent in reply to a DESCRIBE request), from calls to cap.retrieve(data, <returned index>).
       CAP_PROP_FRAME_TYPE = 69, //!< (read-only) FFmpeg back-end only - Frame type ascii code (73 = 'I', 80 = 'P', 66 = 'B' or 63 = '?' if unknown) of the most recently read frame.
       CAP_PROP_N_THREADS = 70, //!< (**open-only**) Set the maximum number of threads to use. Use 0 to use as many threads as CPU cores (applicable for FFmpeg back-end only).
#ifndef CV_DOXYGEN
       CV__CAP_PROP_LATEST
#endif
     };

加载本地视频流文件运行结果:

 

 用摄像头和从URL获取视频流都运行测试过了,都是可以的,此处就不贴图了。