基于FFMPEG的RTP推流H264和AAC文件

来源:互联网 发布:北京阿里云招聘 编辑:程序博客网 时间:2024/05/01 02:55

在本文中主要讲如何用FFMPEG编写RTP的推流程序和打视音频时间戳上的问题
PS:文中代码基于LINUX

一. 文件的打开和输出流的打开

用avformat_open_input分别打开视音频文件,用avformat_alloc_output_context2打开输出的RTP流,注意,这里用的选项是rtp_mpegts,代表的是传输的视音频数据会打包成TS流的形式进行发送。rtp一个端口只能传输一路数据,所以我这里开了两个rtp端口,分别传输视音频数据。

avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0);avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0);avformat_alloc_output_context2(ofmt_ctx, NULL, "rtp_mpegts", NULL);avformat_alloc_output_context2(ofmt_ctx+1, NULL, "rtp_mpegts", NULL);

二. 读取视音频数据并打上时间戳

av_compare_ts两个不同时基的时间戳,谁的比较大。
通过av_compare_ts决定要读取音频数据还是视频数据,当音频时间戳比较大的时候就读取视频数据,当视频时间戳比较大的时候就读取音频时间戳。

1. 视频时间戳
AVRational time_base = in_stream->time_base;//{ 1, 1000 };  AVRational r_framerate1 = in_stream->r_frame_rate;AVRational time_base_q = { 1, AV_TIME_BASE };  //Duration between 2 frames (us)  int64_t calc_duration = (double)(AV_TIME_BASE) / av_q2d(r_framerate1);  //内部时间戳 pkt.pts = av_rescale_q(vframe_index*calc_duration, time_base_q, time_base);pkt.dts = pkt.pts; pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);pkt.pos = -1;pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);  pkt.pos = -1;  

av_q2d(a) ——————— 把AVRatioal结构a转换成double的函数
av_rescale_q(a,b,c) ——– 把a从b时基转换成c时基
上面的一大堆代码其实是在不同的时基上大时间戳后,进行转换。

首先是这段代码

AVRational time_base1=in_stream->time_base; AVRational time_base_q = { 1, AV_TIME_BASE }; double frame_size = (double)in_stream->codec->frame_size;double sample_rate = (double)in_stream->codec->sample_rate;//Duration between 2 frames (us)  int64_t calc_duration=(double)(AV_TIME_BASE) * (frame_size/sample_rate);pkt.pts = av_rescale_q(aframe_index*calc_duration, time_base_q, time_base1);pkt.dts = pkt.pts; pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base1);pkt.pos = -1;

在视频文件时钟的时基上打时间戳,然后进行时间戳的转换,把时间戳转换为RTP的时基单位。

pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);  pkt.pos = -1;  

减去那些时钟转化的步骤其实就是

pkt.pts = (double)frame_index * 1000 / av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);pkt.dts = pkt.pts;pkt.duration = 1000 / av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);pkt.pos = -1; 

也就是视频时间戳的公式是:pts = frame_index++ *(1000/fps);

2. 音频时间戳

音频时间戳的公式就是:
1. 计算两帧之间的时间间隔:duration = frame_size / frame_rate * 1000 (这是以毫秒为单位)
2.frame_size是一帧音频有多少采样点,frame_rate是音频的采样率,也就是一秒有多少采样点
3.frame_size/frame_rate也就是一帧的时长,经过单位转化成毫秒

AVRational time_base1=in_stream->time_base; AVRational time_base_q = { 1, AV_TIME_BASE }; double frame_size = in_stream->codec->frame_size;double sample_rate = in_stream->codec->sample_rate; //Duration between 2 frames (us)  int64_t calc_duration=(double)(AV_TIME_BASE) * (frame_size/sample_rate);pkt.pts = av_rescale_q(aframe_index*calc_duration, time_base_q, time_base1);pkt.dts = pkt.pts; pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base1);pkt.pos = -1;

VLC播放的SDP问题

本人程序用的SDP文件如下:

v=0  c=IN IP4 192.168.1.121 t=0 0  a=tool:libavformat 57.25.100 m=video 6666 RTP/AVP 33    a=rtpmap:33 MP2T   m=video 6668 RTP/AVP 33

c=IN IP4 192.168.1.121 这一行要根据自己的IP地址进行修改。

整体的源代码:

#include <stdio.h>  #include <libavformat/avformat.h>  #include <libavutil/mathematics.h>  #include <libavutil/time.h> #include <libavcodec/avcodec.h>int main(int argc, char* argv[])  {      AVOutputFormat *ofmt = NULL;      //输入对应一个AVFormatContext,输出对应一个AVFormatContext      //(Input AVFormatContext and Output AVFormatContext)      AVFormatContext *ifmt_ctx_a = NULL, *ifmt_ctx_v = NULL, *ifmt_ctx,                     *ofmt_ctx[2];      AVPacket pkt,pkt1;    const char *in_filename_a, *in_filename_v,                 *out_filename_a, *out_filename_v;      int ret, i;      int videoindex=-1, audioindex=-1;      int vframe_index=0, aframe_index = 0,  index = 0;     int64_t start_time=0;      int64_t cur_pts_v=0, cur_pts_a=0;      in_filename_v = "test.h264";//输入视频文件    in_filename_a = "test.aac";//输入音频文件    out_filename_v = "rtp://192.168.1.121:6666";//视频端口    out_filename_a = "rtp://192.168.1.121:6668";//音频端口                                                                       av_register_all();      //Network      avformat_network_init();      //视频输入结构体初始化     if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {          printf( "Could not open input file.");          goto end;      }      if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {          printf( "Failed to retrieve input stream information");          goto end;      }      //音频输入结构体初始化    if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {          printf( "Could not open input file.");          goto end;      }      if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {          printf( "Failed to retrieve input stream information");          goto end;      }      /**********打印输入文件信息**********/    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);        av_dump_format(ifmt_ctx_a, 1, in_filename_a, 0);     /************************************/    //rtp输出结构体初始化    avformat_alloc_output_context2(ofmt_ctx, NULL, "rtp_mpegts", NULL);    avformat_alloc_output_context2(ofmt_ctx+1, NULL, "rtp_mpegts", NULL);    if (!ofmt_ctx) {          printf( "Could not create output context\n");          ret = AVERROR_UNKNOWN;          goto end;      }      //视频部分    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {          //根据输入流创建输出流        AVStream *in_stream = ifmt_ctx_v->streams[i];        AVStream *out_stream = avformat_new_stream(ofmt_ctx[0], in_stream->codec->codec);          if (!out_stream) {              printf( "Failed allocating output stream\n");              ret = AVERROR_UNKNOWN;              goto end;          }            //复制AVCodecContext的设置        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);          if (ret < 0) {              printf( "Failed to copy context from input to output stream codec context\n");              goto end;          }          out_stream->codec->codec_tag = 0;          if (ofmt_ctx[0]->oformat->flags & AVFMT_GLOBALHEADER)              out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;      }      //音频部分    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {          //根据输入流创建输出流        AVStream *in_stream = ifmt_ctx_a->streams[i];        AVStream *out_stream = avformat_new_stream(ofmt_ctx[1], in_stream->codec->codec);          if (!out_stream) {              printf( "Failed allocating output stream\n");              ret = AVERROR_UNKNOWN;              goto end;          }            //复制AVCodecContext的设置         ret = avcodec_copy_context(out_stream->codec, in_stream->codec);          if (ret < 0) {              printf( "Failed to copy context from input to output stream codec context\n");              goto end;          }          out_stream->codec->codec_tag = 0;          if (ofmt_ctx[1]->oformat->flags & AVFMT_GLOBALHEADER)              out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;      }      //打印输出的信息格式    av_dump_format(ofmt_ctx[0], 0, out_filename_v, 1);     av_dump_format(ofmt_ctx[1], 1, out_filename_a, 1);    //打开输出URL(Open output URL)      if (!((*ofmt_ctx)->oformat->flags & AVFMT_NOFILE)) {          ret = avio_open(&(ofmt_ctx[0])->pb, out_filename_v, AVIO_FLAG_WRITE);          if (ret < 0) {              printf( "Could not open output URL '%s'", out_filename_v);              goto end;          }    }      if (!((*ofmt_ctx)->oformat->flags & AVFMT_NOFILE)) {          ret = avio_open(&(ofmt_ctx[1])->pb, out_filename_a, AVIO_FLAG_WRITE);          if (ret < 0) {              printf( "Could not open output URL '%s'", out_filename_a);              goto end;          }    }      //写文件头(Write file header)      ret= avformat_write_header(ofmt_ctx[0], NULL);    ret= avformat_write_header(ofmt_ctx[1], NULL);    if (ret < 0) {          printf( "Error occurred when opening output URL\n");          goto end;      }     start_time=av_gettime();      while (1) {          AVStream *in_stream, *out_stream;          videoindex = 0;        audioindex = 0;        //Get an AVPacket          if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[0]->time_base,cur_pts_a,ifmt_ctx_a->streams[0]->time_base) <= 0){              ifmt_ctx=ifmt_ctx_v;            index = 0;            if(av_read_frame(ifmt_ctx, &pkt) >= 0){                  do{                      in_stream  = ifmt_ctx->streams[0];                      out_stream = ofmt_ctx[index]->streams[0];                      if(pkt.stream_index==index){                          AVRational time_base = in_stream->time_base;//{ 1, 1000 };                          AVRational r_framerate1 = in_stream->r_frame_rate;                        AVRational time_base_q = { 1, AV_TIME_BASE };                          //Duration between 2 frames (us)                          int64_t calc_duration = (double)(AV_TIME_BASE) / av_q2d(r_framerate1);  //内部时间戳                         pkt.pts = av_rescale_q(vframe_index*calc_duration, time_base_q, time_base);                        pkt.dts = pkt.pts;                         pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);                        pkt.pos = -1;                        vframe_index++;                          cur_pts_v=pkt.pts;                          //发送延时Important:Delay                        {                            AVRational time_base=ifmt_ctx->streams[index]->time_base;                            AVRational time_base_q={1,AV_TIME_BASE};                              int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);                              int64_t now_time = av_gettime() - start_time;                              if (pts_time > now_time)                                  av_usleep(pts_time - now_time);                        }                        break;                      }                  }while(av_read_frame(ifmt_ctx, &pkt) >= 0);              }            else{                  break;              }          }        else{              ifmt_ctx=ifmt_ctx_a;              index = 1;              if(av_read_frame(ifmt_ctx, &pkt) >= 0){                  do{                      in_stream  = ifmt_ctx->streams[0];                      out_stream = ofmt_ctx[index]->streams[0];                      if(pkt.stream_index==0){                          //FIX:No PTS                          //Simple Write PTS                          AVRational time_base1=in_stream->time_base;                         AVRational time_base_q = { 1, AV_TIME_BASE };                         double frame_size = (double)in_stream->codec->frame_size;                        double sample_rate = (double)in_stream->codec->sample_rate;                        //Duration between 2 frames (us)                          int64_t calc_duration=(double)(AV_TIME_BASE) * (frame_size/sample_rate);                        pkt.pts = av_rescale_q(aframe_index*calc_duration, time_base_q, time_base1);                        pkt.dts = pkt.pts;                         pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base1);                        pkt.pos = -1;                        aframe_index++;                          cur_pts_a=pkt.pts;                          break;                      }                  }while(av_read_frame(ifmt_ctx, &pkt) >= 0);              }else{                  break;              }          }         //Convert PTS/DTS          pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));          pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));          pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);          pkt.pos = -1;          if(index == 0)            printf("Write video Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts);          if(index == 1)            printf("Write audio Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts);          if (av_interleaved_write_frame(ofmt_ctx[index], &pkt) < 0) {              printf( "Error muxing packet\n");              break;          }          av_free_packet(&pkt);      }    //写文件尾(Write file trailer)      av_write_trailer(ofmt_ctx[0]);    av_write_trailer(ofmt_ctx[1]);end:      avformat_close_input(&ifmt_ctx_v);    avformat_close_input(&ifmt_ctx_a);    /* close output */      if (ofmt_ctx[0] && !(ofmt_ctx[0]->oformat->flags & AVFMT_NOFILE))          avio_close((ofmt_ctx[0])->pb);          avformat_free_context(ofmt_ctx[0]);      if (ofmt_ctx[1] && !(ofmt_ctx[1]->oformat->flags & AVFMT_NOFILE))          avio_close((ofmt_ctx[1])->pb);          avformat_free_context(ofmt_ctx[1]);      if (ret < 0 && ret != AVERROR_EOF) {          printf( "Error occurred.\n");          return -1;      }      return 0;  } 
1 0
原创粉丝点击