android基于ffmpeg的简单视频播发器 时间同步

来源:互联网 发布:java mongodb 连接池 编辑:程序博客网 时间:2024/05/21 18:38

前面写了视频解码和音频解码,接下来要同步了

java代码

setContentView(R.layout.activity_main);SurfaceView surfaceView = findViewById(R.id.surface_view);surfaceView.getHolder().addCallback(new SurfaceHolder.Callback() {    @Override    public void surfaceCreated(SurfaceHolder holder) {    }    @Override    public void surfaceChanged(final SurfaceHolder holder, int format, int width, int height) {        Thread audioThread = new Thread(){            @Override            public void run() {                super.run();                String videoPath = "/storage/emulated/0/baiduNetdisk/season09.mp4";                audioPlay(videoPath);            }        };        audioThread.start();        Thread videoThread = new Thread(){            @Override            public void run() {                super.run();                String videoPath = "/storage/emulated/0/baiduNetdisk/season09.mp4";                videoPlay(videoPath,holder.getSurface());            }        };        videoThread.start();    }    @Override    public void surfaceDestroyed(SurfaceHolder holder) {    }});
就是开两个线程主要还是c++代码

既然要同步线程,肯定是让快的线程等待慢的线程,通过前面的代码知道,视频线程比音频快,所以只能让视频线程进行等待,等待的话就用到了锁

pthread_mutex_t video_mutex;pthread_cond_t video_cond;
还需要等待时间

long audio_time = 0;long start_time = 0;

知道当前时间

long getCurrentTime() {    struct timeval tv;    gettimeofday(&tv,NULL);    return tv.tv_sec * 1000 + tv.tv_usec / 1000;}

计算等待时间的方法

timespec waitTime(long timeout_ms){    struct timespec abstime;    struct timeval now;    gettimeofday(&now, NULL);    long nsec = now.tv_usec * 1000 + (timeout_ms % 1000) * 1000000;    abstime.tv_sec=now.tv_sec + nsec / 1000000000 + timeout_ms / 1000;    abstime.tv_nsec=nsec % 1000000000;    return abstime;}
首先,要在音频线程计算出音频的时间

double nowTime = frame->pts * av_q2d(avStream->time_base);long t = (long) (nowTime * 1000);audio_time = t;start_time = getCurrentTime();
然后在视频线程计算出时间差

double nowTime = yuvFrame->pts * av_q2d(avStream->time_base);long t = (long) (nowTime * 1000);long time = getCurrentTime() - start_time;long wait = t - time - audio_time;
再让视频线程进行等待

struct timespec abstime = waitTime(wait);pthread_mutex_lock(&video_mutex);pthread_cond_timedwait(&video_cond, &video_mutex,&abstime);pthread_mutex_unlock(&video_mutex);
这样就实现时间同步,误差是一定有的,几毫秒到十几毫秒吧

贴完整代码

pthread_mutex_t video_mutex;pthread_cond_t video_cond;long audio_time = 0;long start_time = 0;long getCurrentTime() {    struct timeval tv;    gettimeofday(&tv,NULL);    return tv.tv_sec * 1000 + tv.tv_usec / 1000;}timespec waitTime(long timeout_ms){    struct timespec abstime;    struct timeval now;    gettimeofday(&now, NULL);    long nsec = now.tv_usec * 1000 + (timeout_ms % 1000) * 1000000;    abstime.tv_sec=now.tv_sec + nsec / 1000000000 + timeout_ms / 1000;    abstime.tv_nsec=nsec % 1000000000;    return abstime;}extern "C"JNIEXPORT void JNICALLJava_com_example_ffmpegrun_MainActivity_videoPlay(JNIEnv *env, jobject instance, jstring path_,                                                  jobject surface) {    const char *path = env->GetStringUTFChars(path_, 0);        // TODO    pthread_mutex_init (&video_mutex,NULL);    pthread_cond_init(&video_cond,NULL);    av_register_all();    AVFormatContext *fmt_ctx = avformat_alloc_context();    if (avformat_open_input(&fmt_ctx, path, NULL, NULL) < 0) {        return;    }    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {        return;    }    AVStream *avStream = NULL;    int video_stream_index = -1;    for (int i = 0; i < fmt_ctx->nb_streams; i++) {        if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {            avStream = fmt_ctx->streams[i];            video_stream_index = i;            break;        }    }    if (video_stream_index == -1) {        return;    }    AVCodecContext *codec_ctx = avcodec_alloc_context3(NULL);    avcodec_parameters_to_context(codec_ctx, avStream->codecpar);    AVCodec *avCodec = avcodec_find_decoder(codec_ctx->codec_id);    if (avcodec_open2(codec_ctx, avCodec, NULL) < 0) {        return;    }    ANativeWindow* nativeWindow = ANativeWindow_fromSurface(env,surface);    AVFrame *yuvFrame = av_frame_alloc();    EGLUtils *eglUtils = new EGLUtils();    eglUtils->initEGL(nativeWindow);    OpenGLUtils *openGLUtils = new OpenGLUtils();    openGLUtils->surfaceCreated();    openGLUtils->surfaceChanged(eglUtils->getWidth(),eglUtils->getHeight());    openGLUtils->initTexture(codec_ctx->width,codec_ctx->height);    int y_size = codec_ctx->width * codec_ctx->height;    AVPacket *pkt = (AVPacket *) malloc(sizeof(AVPacket));    av_new_packet(pkt, y_size);    int ret;    while (1) {        if (av_read_frame(fmt_ctx, pkt) < 0) {            av_packet_unref(pkt);            break;        }        if (pkt->stream_index == video_stream_index) {            ret = avcodec_send_packet(codec_ctx, pkt);            if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {                av_packet_unref(pkt);                continue;            }            ret = avcodec_receive_frame(codec_ctx, yuvFrame);            if (ret < 0 && ret != AVERROR_EOF) {                av_packet_unref(pkt);                continue;            }            double nowTime = yuvFrame->pts * av_q2d(avStream->time_base);            long t = (long) (nowTime * 1000);            long time = getCurrentTime() - start_time;            long wait = t - time - audio_time;            struct timespec abstime = waitTime(wait);            pthread_mutex_lock(&video_mutex);            pthread_cond_timedwait(&video_cond, &video_mutex,&abstime);            pthread_mutex_unlock(&video_mutex);            openGLUtils->updateTexture(yuvFrame->width,yuvFrame->height,yuvFrame->data[0],yuvFrame->data[1],yuvFrame->data[2]);            openGLUtils->surfaceDraw();            eglUtils->drawEGL();            av_packet_unref(pkt);        }        av_packet_unref(pkt);    }    av_frame_free(&yuvFrame);    avcodec_close(codec_ctx);    avformat_close_input(&fmt_ctx);    pthread_cond_destroy(&video_cond);    pthread_mutex_destroy(&video_mutex);    env->ReleaseStringUTFChars(path_, path);}#define MAX_AUDIO_FRME_SIZE 48000 * 4extern "C"JNIEXPORT void JNICALLJava_com_example_ffmpegrun_MainActivity_audioPlay(JNIEnv *env, jobject instance, jstring path_) {    const char *path = env->GetStringUTFChars(path_, 0);    // TODO    av_register_all();    AVFormatContext *fmt_ctx = avformat_alloc_context();    if (avformat_open_input(&fmt_ctx, path, NULL, NULL) < 0) {        return;    }    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {        return;    }    AVStream *avStream = NULL;    int audio_stream_index = -1;    for (int i = 0; i < fmt_ctx->nb_streams; i++) {        if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {            avStream = fmt_ctx->streams[i];            audio_stream_index = i;            break;        }    }    if (audio_stream_index == -1) {        return;    }    AVCodecContext *codec_ctx = avcodec_alloc_context3(NULL);    avcodec_parameters_to_context(codec_ctx, avStream->codecpar);    AVCodec *avCodec = avcodec_find_decoder(codec_ctx->codec_id);    if (avcodec_open2(codec_ctx, avCodec, NULL) < 0) {        return;    }    SwrContext *swr_ctx = swr_alloc();    enum AVSampleFormat in_sample_fmt = codec_ctx->sample_fmt;    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;    int in_sample_rate = codec_ctx->sample_rate;    int out_sample_rate = in_sample_rate;    uint64_t in_ch_layout = codec_ctx->channel_layout;    uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;    swr_alloc_set_opts(swr_ctx,                       out_ch_layout, out_sample_fmt, out_sample_rate,                       in_ch_layout, in_sample_fmt, in_sample_rate,                       0, NULL);    swr_init(swr_ctx);    int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);    jclass player_class = env->GetObjectClass(instance);    jmethodID create_audio_track_mid = env->GetMethodID(player_class, "createAudio",                                                        "(II)Landroid/media/AudioTrack;");    jobject audio_track = env->CallObjectMethod(instance, create_audio_track_mid,                                                out_sample_rate, out_channel_nb);    jclass audio_track_class = env->GetObjectClass(audio_track);    jmethodID audio_track_play_mid = env->GetMethodID(audio_track_class, "play", "()V");    jmethodID audio_track_stop_mid = env->GetMethodID(audio_track_class, "stop", "()V");    env->CallVoidMethod(audio_track, audio_track_play_mid);    jmethodID audio_track_write_mid = env->GetMethodID(audio_track_class, "write",                                                       "([BII)I");    uint8_t *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);    AVPacket *pkt = (AVPacket *) malloc(sizeof(AVPacket));    int ret;    while (1) {        if (av_read_frame(fmt_ctx, pkt) < 0){            av_packet_unref(pkt);            break;        }        ret = avcodec_send_packet(codec_ctx, pkt);        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {            av_packet_unref(pkt);            continue;        }        AVFrame *frame = av_frame_alloc();        ret = avcodec_receive_frame(codec_ctx, frame);        if (ret < 0 && ret != AVERROR_EOF) {            av_packet_unref(pkt);            av_frame_free(&frame);            continue;        }        double nowTime = frame->pts * av_q2d(avStream->time_base);        long t = (long) (nowTime * 1000);        audio_time = t;        start_time = getCurrentTime();        swr_convert(swr_ctx, &out_buffer, MAX_AUDIO_FRME_SIZE,                    (const uint8_t **) frame->data,                    frame->nb_samples);        int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,                                                         frame->nb_samples, out_sample_fmt,                                                         1);        jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);        jbyte *sample_bytep = env->GetByteArrayElements(audio_sample_array, NULL);        memcpy(sample_bytep, out_buffer, (size_t) out_buffer_size);        env->ReleaseByteArrayElements(audio_sample_array, sample_bytep, 0);        env->CallIntMethod(audio_track, audio_track_write_mid,                           audio_sample_array, 0, out_buffer_size);        env->DeleteLocalRef(audio_sample_array);        av_frame_free(&frame);        av_packet_unref(pkt);    }    env->CallVoidMethod(audio_track, audio_track_stop_mid);    av_free(out_buffer);    swr_free(&swr_ctx);    avcodec_close(codec_ctx);    avformat_close_input(&fmt_ctx);    env->ReleaseStringUTFChars(path_, path);}

2017/11/6更新

发现个bug修改一下

声音解码时在av_read_frame()之后加个判断

if(pkt->stream_index == audio_stream_index)

然后再进行播放处理,因为av_read_frame()得到的数据有视频数据和音频数据,因为这个线程是专门处理声音的,所以把视频数据给过滤掉,就像视频线程会过滤音频数据一样









阅读全文
0 0