本文将详细介绍在Android平台上使用FFmpeg进行高效视频解码的实现方案,采用面向对象的设计思想。
一、架构设计
1.1 整体架构
采用三层架构设计:
• 应用层:提供用户接口和UI展示
• 业务逻辑层:管理解码流程和状态
• Native层:FFmpeg核心解码实现
1.2 状态管理方案
使用静态常量替代枚举类:
public class DecodeState {
public static final int STATE_IDLE = 0;
public static final int STATE_PREPARING = 1;
public static final int STATE_READY = 2;
public static final int STATE_DECODING = 3;
public static final int STATE_PAUSED = 4;
public static final int STATE_STOPPED = 5;
public static final int STATE_ERROR = 6;
}
二、核心类实现
2.1 视频帧数据封装类
public class VideoFrame {
private final byte[] videoData;
private final int width;
private final int height;
private final long pts;
private final int format;
private final int rotation;
public VideoFrame(byte[] videoData, int width, int height, long pts, int format, int rotation) {
this.videoData = videoData;
this.width = width;
this.height = height;
this.pts = pts;
this.format = format;
this.rotation = rotation;
}
// Getter方法
public byte[] getVideoData() {
return videoData;
}
public int getWidth() {
return width;
}
public int getHeight() {
return height;
}
public long getPts() {
return pts;
}
public int getFormat() {
return format;
}
public int getRotation() {
return rotation;
}
// 转换为Bitmap
public Bitmap toBitmap() {
YuvImage yuvImage = new YuvImage(videoData, ImageFormat.NV21, width, height, null);
ByteArrayOutputStream os = new ByteArrayOutputStream();
yuvImage.compressToJpeg(new Rect(0, 0, width, height), 100, os);
byte[] jpegByteArray = os.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegByteArray, 0, jpegByteArray.length);
// 处理旋转
if (rotation != 0) {
Matrix matrix = new Matrix();
matrix.postRotate(rotation);
bitmap = Bitmap.createBitmap(bitmap, 0, 0,
bitmap.getWidth(), bitmap.getHeight(),
matrix, true);
}
return bitmap;
}
}
2.2 视频解码器封装类
public class VideoDecoder {
// 解码状态常量
public static final int STATE_IDLE = 0;
public static final int STATE_PREPARING = 1;
public static final int STATE_READY = 2;
public static final int STATE_DECODING = 3;
public static final int STATE_PAUSED = 4;
public static final int STATE_STOPPED = 5;
public static final int STATE_ERROR = 6;
// 错误码常量
public static final int ERROR_CODE_FILE_NOT_FOUND = 1001;
public static final int ERROR_CODE_UNSUPPORTED_FORMAT = 1002;
public static final int ERROR_CODE_DECODE_FAILED = 1003;
private volatile int currentState = STATE_IDLE;
private long nativeHandle;
private Handler mainHandler;
public interface DecodeListener {
void onFrameDecoded(VideoFrame frame);
void onDecodeFinished();
void onErrorOccurred(int errorCode, String message);
void onStateChanged(int newState);
}
public VideoDecoder() {
nativeHandle = nativeInit();
mainHandler = new Handler(Looper.getMainLooper());
}
public void prepare(String filePath) {
if (currentState != STATE_IDLE) {
notifyError(ERROR_CODE_DECODE_FAILED, "Decoder is not in idle state");
return;
}
setState(STATE_PREPARING);
new Thread(() -> {
boolean success = nativePrepare(nativeHandle, filePath);
if (success) {
setState(STATE_READY);
} else {
setState(STATE_ERROR);
notifyError(ERROR_CODE_FILE_NOT_FOUND, "Failed to prepare decoder");
}
}).start();
}
public void startDecoding(DecodeListener listener) {
if (currentState != STATE_READY && currentState != STATE_PAUSED) {
notifyError(ERROR_CODE_DECODE_FAILED, "Decoder is not ready");
return;
}
setState(STATE_DECODING);
new Thread(() -> {
nativeStartDecoding(nativeHandle, listener);
setState(STATE_STOPPED);
}).start();
}
public void pause() {
if (currentState == STATE_DECODING) {
setState(STATE_PAUSED);
nativePause(nativeHandle);
}
}
public void resume() {
if (currentState == STATE_PAUSED) {
setState(STATE_DECODING);
nativeResume(nativeHandle);
}
}
public void stop() {
setState(STATE_STOPPED);
nativeStop(nativeHandle);
}
public void release() {
setState(STATE_STOPPED);
nativeRelease(nativeHandle);
nativeHandle = 0;
}
public int getCurrentState() {
return currentState;
}
private void setState(int newState) {
currentState = newState;
mainHandler.post(() -> {
if (listener != null) {
listener.onStateChanged(newState);
}
});
}
private void notifyError(int errorCode, String message) {
mainHandler.post(() -> {
if (listener != null) {
listener.onErrorOccurred(errorCode, message);
}
});
}
// Native方法
private native long nativeInit();
private native boolean nativePrepare(long handle, String filePath);
private native void nativeStartDecoding(long handle, DecodeListener listener);
private native void nativePause(long handle);
private native void nativeResume(long handle);
private native void nativeStop(long handle);
private native void nativeRelease(long handle);
static {
System.loadLibrary("avcodec");
System.loadLibrary("avformat");
System.loadLibrary("avutil");
System.loadLibrary("swscale");
System.loadLibrary("ffmpeg-wrapper");
}
}
三、Native层实现
3.1 上下文结构体
typedef struct {
AVFormatContext *format_ctx;
AVCodecContext *codec_ctx;
int video_stream_idx;
SwsContext *sws_ctx;
volatile int is_decoding;
volatile int is_paused;
int video_width;
int video_height;
int rotation;
} VideoDecodeContext;
3.2 JNI接口实现
// 初始化解码器
JNIEXPORT jlong JNICALL
Java_com_example_VideoDecoder_nativeInit(JNIEnv *env, jobject thiz) {
VideoDecodeContext *ctx = (VideoDecodeContext *)malloc(sizeof(VideoDecodeContext));
memset(ctx, 0, sizeof(VideoDecodeContext));
ctx->is_decoding = 0;
ctx->is_paused = 0;
ctx->rotation = 0;
return (jlong)ctx;
}
// 准备解码器
JNIEXPORT jboolean JNICALL
Java_com_example_VideoDecoder_nativePrepare(JNIEnv *env, jobject thiz,
jlong handle, jstring file_path) {
VideoDecodeContext *ctx = (VideoDecodeContext *)handle;
const char *path = (*env)->GetStringUTFChars(env, file_path, NULL);
// 打开媒体文件
if (avformat_open_input(&ctx->format_ctx, path, NULL, NULL) != 0) {
LOGE("Could not open file: %s", path);
(*env)->ReleaseStringUTFChars(env, file_path, path);
return JNI_FALSE;
}
// 获取流信息
if (avformat_find_stream_info(ctx->format_ctx, NULL) < 0) {
LOGE("Could not find stream information");
(*env)->ReleaseStringUTFChars(env, file_path, path);
avformat_close_input(&ctx->format_ctx);
return JNI_FALSE;
}
// 查找视频流
ctx->video_stream_idx = -1;
for (int i = 0; i < ctx->format_ctx->nb_streams; i++) {
if (ctx->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
ctx->video_stream_idx = i;
// 获取视频旋转信息
AVDictionaryEntry *rotate_tag = av_dict_get(ctx->format_ctx->streams[i]->metadata,
"rotate", NULL, 0);
if (rotate_tag && rotate_tag->value) {
ctx->rotation = atoi(rotate_tag->value);
}
break;
}
}
// 检查是否找到视频流
if (ctx->video_stream_idx == -1) {
LOGE("Could not find video stream");
(*env)->ReleaseStringUTFChars(env, file_path, path);
avformat_close_input(&ctx->format_ctx);
return JNI_FALSE;
}
// 获取解码器参数
AVCodecParameters *codec_params = ctx->format_ctx->streams[ctx->video_stream_idx]->codecpar;
AVCodec *decoder = avcodec_find_decoder(codec_params->codec_id);
if (!decoder) {
LOGE("Unsupported codec");
(*env)->ReleaseStringUTFChars(env, file_path, path);
avformat_close_input(&ctx->format_ctx);
return JNI_FALSE;
}
// 创建解码上下文
ctx->codec_ctx = avcodec_alloc_context3(decoder);
avcodec_parameters_to_context(ctx->codec_ctx, codec_params);
// 打开解码器
if (avcodec_open2(ctx->codec_ctx, decoder, NULL) < 0) {
LOGE("Could not open codec");
(*env)->ReleaseStringUTFChars(env, file_path, path);
avcodec_free_context(&ctx->codec_ctx);
avformat_close_input(&ctx->format_ctx);
return JNI_FALSE;
}
// 保存视频尺寸
ctx->video_width = ctx->codec_ctx->width;
ctx->video_height = ctx->codec_ctx->height;
(*env)->ReleaseStringUTFChars(env, file_path, path);
return JNI_TRUE;
}
3.3 核心解码逻辑
// 开始解码
JNIEXPORT void JNICALL
Java_com_example_VideoDecoder_nativeStartDecoding(JNIEnv *env, jobject thiz,
jlong handle, jobject listener) {
VideoDecodeContext *ctx = (VideoDecodeContext *)handle;
ctx->is_decoding = 1;
ctx->is_paused = 0;
// 获取Java回调方法和类
jclass listener_class = (*env)->GetObjectClass(env, listener);
jmethodID on_frame_method = (*env)->GetMethodID(env, listener_class,
"onFrameDecoded",
"(Lcom/example/VideoFrame;)V");
jmethodID on_finish_method = (*env)->GetMethodID(env, listener_class,
"onDecodeFinished", "()V");
jmethodID on_error_method = (*env)->GetMethodID(env, listener_class,
"onErrorOccurred", "(ILjava/lang/String;)V");
// 分配帧和包
AVFrame *frame = av_frame_alloc();
AVFrame *rgb_frame = av_frame_alloc();
AVPacket *packet = av_packet_alloc();
// 准备图像转换上下文 (转换为RGB24)
ctx->sws_ctx = sws_getContext(
ctx->video_width, ctx->video_height, ctx->codec_ctx->pix_fmt,
ctx->video_width, ctx->video_height, AV_PIX_FMT_RGB24,
SWS_BILINEAR, NULL, NULL, NULL);
if (!ctx->sws_ctx) {
(*env)->CallVoidMethod(env, listener, on_error_method,
VideoDecoder.ERROR_CODE_DECODE_FAILED,
(*env)->NewStringUTF(env, "Could not initialize sws context"));
goto end;
}
// 分配RGB缓冲区
int rgb_buffer_size = av_image_get_buffer_size(AV_PIX_FMT_RGB24,
ctx->video_width,
ctx->video_height, 1);
uint8_t *rgb_buffer = (uint8_t *)av_malloc(rgb_buffer_size);
av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, rgb_buffer,
AV_PIX_FMT_RGB24, ctx->video_width,
ctx->video_height, 1);
// 解码循环
while (ctx->is_decoding && av_read_frame(ctx->format_ctx, packet) >= 0) {
if (packet->stream_index == ctx->video_stream_idx) {
// 发送到解码器
if (avcodec_send_packet(ctx->codec_ctx, packet) == 0) {
// 接收解码后的帧
while (avcodec_receive_frame(ctx->codec_ctx, frame) == 0) {
if (!ctx->is_decoding) break;
// 等待暂停状态结束
while (ctx->is_paused && ctx->is_decoding) {
usleep(10000); // 10ms
}
if (!ctx->is_decoding) break;
// 转换像素格式
sws_scale(ctx->sws_ctx, (const uint8_t *const *)frame->data,
frame->linesize, 0, ctx->video_height,
rgb_frame->data, rgb_frame->linesize);
// 创建Java VideoFrame对象
jclass frame_class = (*env)->FindClass(env, "com/example/VideoFrame");
jmethodID frame_ctor = (*env)->GetMethodID(env, frame_class,
"<init>", "([BIIJI)V");
// 创建字节数组
jbyteArray rgb_array = (*env)->NewByteArray(env, rgb_buffer_size);
(*env)->SetByteArrayRegion(env, rgb_array, 0, rgb_buffer_size,
(jbyte *)rgb_buffer);
// 创建VideoFrame对象
jobject video_frame = (*env)->NewObject(env, frame_class, frame_ctor,
rgb_array,
ctx->video_width,
ctx->video_height,
frame->pts,
AV_PIX_FMT_RGB24,
ctx->rotation);
// 回调到Java层
(*env)->CallVoidMethod(env, listener, on_frame_method, video_frame);
// 释放本地引用
(*env)->DeleteLocalRef(env, video_frame);
(*env)->DeleteLocalRef(env, rgb_array);
}
}
}
av_packet_unref(packet);
}
// 解码完成回调
if (ctx->is_decoding) {
(*env)->CallVoidMethod(env, listener, on_finish_method);
}
end:
// 释放资源
if (rgb_buffer) av_free(rgb_buffer);
if (ctx->sws_ctx) sws_freeContext(ctx->sws_ctx);
av_frame_free(&frame);
av_frame_free(&rgb_frame);
av_packet_free(&packet);
}
四、使用示例
public class VideoPlayerActivity extends AppCompatActivity
implements VideoDecoder.DecodeListener {
private VideoDecoder videoDecoder;
private ImageView videoView;
private Button btnPlay, btnPause, btnStop;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_video_player);
videoView = findViewById(R.id.video_view);
btnPlay = findViewById(R.id.btn_play);
btnPause = findViewById(R.id.btn_pause);
btnStop = findViewById(R.id.btn_stop);
videoDecoder = new VideoDecoder();
// 准备视频文件
String videoPath = getExternalFilesDir(null) + "/test.mp4";
// 设置按钮点击监听
btnPlay.setOnClickListener(v -> {
if (videoDecoder.getCurrentState() == VideoDecoder.STATE_READY ||
videoDecoder.getCurrentState() == VideoDecoder.STATE_PAUSED) {
videoDecoder.startDecoding(this);
} else if (videoDecoder.getCurrentState() == VideoDecoder.STATE_IDLE) {
videoDecoder.prepare(videoPath);
}
});
btnPause.setOnClickListener(v -> {
if (videoDecoder.getCurrentState() == VideoDecoder.STATE_DECODING) {
videoDecoder.pause();
}
});
btnStop.setOnClickListener(v -> {
if (videoDecoder.getCurrentState() != VideoDecoder.STATE_IDLE &&
videoDecoder.getCurrentState() != VideoDecoder.STATE_STOPPED) {
videoDecoder.stop();
}
});
}
@Override
public void onFrameDecoded(VideoFrame frame) {
runOnUiThread(() -> {
Bitmap bitmap = frame.toBitmap();
videoView.setImageBitmap(bitmap);
});
}
@Override
public void onDecodeFinished() {
runOnUiThread(() -> {
Toast.makeText(this, "解码完成", Toast.LENGTH_SHORT).show();
videoView.setImageBitmap(null);
});
}
@Override
public void onErrorOccurred(int errorCode, String message) {
runOnUiThread(() -> {
String errorMsg = "错误(" + errorCode + "): " + message;
Toast.makeText(this, errorMsg, Toast.LENGTH_LONG).show();
});
}
@Override
public void onStateChanged(int newState) {
runOnUiThread(() -> updateUI(newState));
}
private void updateUI(int state) {
btnPlay.setEnabled(state == VideoDecoder.STATE_READY ||
state == VideoDecoder.STATE_PAUSED ||
state == VideoDecoder.STATE_IDLE);
btnPause.setEnabled(state == VideoDecoder.STATE_DECODING);
btnStop.setEnabled(state == VideoDecoder.STATE_DECODING ||
state == VideoDecoder.STATE_PAUSED);
}
@Override
protected void onDestroy() {
super.onDestroy();
videoDecoder.release();
}
}
五、性能优化建议
使用Surface直接渲染:
• 通过ANativeWindow直接渲染YUV数据,避免格式转换• 减少内存拷贝和Bitmap创建开销
硬解码优先:
// 在nativePrepare中检测硬件解码器 AVCodec *decoder = NULL; if (isHardwareDecodeSupported(codec_id)) { decoder = avcodec_find_decoder_by_name("h264_mediacodec"); } if (!decoder) { decoder = avcodec_find_decoder(codec_id); }
帧缓冲队列优化:
• 实现生产者-消费者模型• 设置合理的队列大小(3-5帧)
• 丢帧策略处理视频不同步问题
多线程处理:
• 分离解码线程和渲染线程• 使用线程池处理耗时操作
内存复用:
// 复用AVPacket和AVFrame static AVPacket *reuse_packet = NULL; if (!reuse_packet) { reuse_packet = av_packet_alloc(); } else { av_packet_unref(reuse_packet); }
精准帧率控制:
// 根据帧率控制解码速度 AVRational frame_rate = ctx->format_ctx->streams[ctx->video_stream_idx]->avg_frame_rate; double frame_delay = av_q2d(av_inv_q(frame_rate)) * 1000000; // 微秒 int64_t last_frame_time = av_gettime(); while (decoding) { // ...解码逻辑... int64_t current_time = av_gettime(); int64_t elapsed = current_time - last_frame_time; if (elapsed < frame_delay) { usleep(frame_delay - elapsed); } last_frame_time = av_gettime(); }
低功耗优化:
• 根据设备温度调整解码策略• 在后台时降低帧率或暂停解码
六、兼容性处理
API版本适配:
private static boolean isSurfaceTextureSupported() { return Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH; }
权限处理:
private boolean checkStoragePermission() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { return checkSelfPermission(Manifest.permission.READ_EXTERNAL_STORAGE) == PackageManager.PERMISSION_GRANTED; } return true; }
ABI兼容:
android { defaultConfig { ndk { abiFilters 'armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64' } } }
七、错误处理与日志
完善的错误处理:
public void onErrorOccurred(int errorCode, String message) { switch (errorCode) { case VideoDecoder.ERROR_CODE_FILE_NOT_FOUND: // 处理文件不存在错误 break; case VideoDecoder.ERROR_CODE_UNSUPPORTED_FORMAT: // 处理不支持的格式错误 break; default: // 处理未知错误 } }
日志系统:
#define LOG_LEVEL_VERBOSE 1 #define LOG_LEVEL_DEBUG 2 #define LOG_LEVEL_INFO 3 #define LOG_LEVEL_WARN 4 #define LOG_LEVEL_ERROR 5 void log_print(int level, const char *tag, const char *fmt, ...) { if (level >= CURRENT_LOG_LEVEL) { va_list args; va_start(args, fmt); __android_log_vprint(level, tag, fmt, args); va_end(args); } }
八、扩展功能
视频信息获取:
public class VideoInfo { public int width; public int height; public long duration; public float frameRate; public int rotation; } // 在VideoDecoder中添加方法 public VideoInfo getVideoInfo() { return nativeGetVideoInfo(nativeHandle); }
视频截图功能:
public Bitmap captureFrame() { if (currentState == STATE_DECODING || currentState == STATE_PAUSED) { return nativeCaptureFrame(nativeHandle); } return null; }
视频缩放控制:
// 在native层实现缩放 sws_scale(ctx->sws_ctx, frame->data, frame->linesize, 0, ctx->video_height, scaled_frame->data, scaled_frame->linesize);
九、测试建议
单元测试:
@Test public void testDecoderStates() { VideoDecoder decoder = new VideoDecoder(); assertEquals(VideoDecoder.STATE_IDLE, decoder.getCurrentState()); decoder.prepare("test.mp4"); // 等待准备完成 assertEquals(VideoDecoder.STATE_READY, decoder.getCurrentState()); }
性能测试:
long startTime = System.currentTimeMillis(); // 执行解码操作 long endTime = System.currentTimeMillis(); Log.d("Performance", "解码耗时: " + (endTime - startTime) + "ms");
内存泄漏检测:
• 使用Android Profiler监控内存使用• 重复创建释放解码器检查内存增长
十、总结
本文实现的Android FFmpeg视频解码方案具有以下特点:
- 高性能:通过Native层优化和合理的内存管理实现高效解码
- 高兼容性:避免使用枚举类,支持广泛的Android设备
- 可扩展性:模块化设计便于添加新功能
- 稳定性:完善的状态管理和错误处理机制
- 易用性:清晰的API接口和完整的文档
开发者可以根据实际需求在此基础框架上进行扩展,如添加音频解码、视频滤镜等功能,构建更完整的媒体播放解决方案。