ffmpeg2.1对HEVC/H.265视频进行解码的例子

来源:互联网 发布:美工主要做什么 编辑:程序博客网 时间:2024/06/10 01:33

新版的ffmpeg 2.1已经加入了对最新视频编解码标准H.265/HEVC的支持,因此可以解码该格式的视频了。这对视频产业来说将起到一种巨大的推动作用。本文所示的demo是一个简单的H.265格式视频的playback程序,是在http://blog.csdn.net/mu399/article/details/5814859这篇文章的基础上针对新版ffmpeg2.1做了部分修改。与参考文章一样,这个demo同样没有加入播放延迟,所以视频的帧率是不正常的,随着学习的深入会逐渐解决这个问题。工程的下载地址为:http://download.csdn.net/detail/shaqoneal/6571657。

这个工程需要用到ffmpeg和SDL的库,如何将这些库加入到工程中,有很多教程可以参考,就不在这里赘述了,上传到资源中的工程已经设置好了。

文件很简单,头文件如下:

//header.h#pragma once#ifdef __cplusplusextern "C" {#endif#include "libavformat/avformat.h"#include "libavcodec/avcodec.h"#include "libswscale/swscale.h"#ifdef __cplusplus}#endif

源文件:

//main.cpp#include "header.h"#include <stdio.h>#include "SDL/include/SDL.h"#include <stdlib.h>   #include <string.h>  #include <math.h> static int sws_flags = SWS_BICUBIC; int main(int argc, char *argv[]) {AVFormatContext *pFormatCtx = NULL;      int i, videoStream(-1);      AVCodecContext *pCodecCtx;      AVCodec *pCodec;      AVFrame *pFrame;      AVPacket packet;      int frameFinished;      float aspect_ratio;      AVCodecContext *aCodecCtx;      SDL_Overlay *bmp;      SDL_Surface *screen;      SDL_Rect rect;      SDL_Event event;      if(argc < 2)      {          fprintf(stderr, "Usage: test /n");          exit(1);      }        av_register_all();       if(avformat_open_input(&pFormatCtx,argv[1],NULL,NULL))          return -1; // Couldn't open file      if(av_find_stream_info(pFormatCtx)<0)          return -1; // Couldn't find stream information      // Dump information about file onto standard error  //    dump_format(pFormatCtx, 0, argv[1], 0);        // Find the first video stream      for(i=0; i<pFormatCtx->nb_streams; i++)      {          if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream<0)          {              videoStream=i;          }      }      if(videoStream==-1)        return -1; // Didn't find a video stream        // Get a pointer to the codec context for the video stream        pCodecCtx=pFormatCtx->streams[videoStream]->codec;      pCodec=avcodec_find_decoder(pCodecCtx->codec_id);      if(pCodec==NULL)      {          fprintf(stderr, "Unsupported codec!/n");          return -1; // Codec not found      }      // Open codec      if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)          return -1; // Could not open codec        // Allocate video frame      pFrame=avcodec_alloc_frame();        uint8_t *buffer;      int numBytes;      // Determine required buffer size and allocate buffer      numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));        if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))      {          fprintf(stderr, "Could not initialize SDL - %s/n", SDL_GetError());          exit(1);      }    #ifndef __DARWIN__      screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);  #else      screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);  #endif      if(!screen)      {          fprintf(stderr, "SDL: could not set video mode - exiting/n");          exit(1);      }        bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,          SDL_YV12_OVERLAY, screen);        static struct SwsContext *img_convert_ctx;      if (img_convert_ctx == NULL)      {          img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,                                           pCodecCtx->pix_fmt,                                           pCodecCtx->width, pCodecCtx->height,                                           PIX_FMT_YUV420P,                                           sws_flags, NULL, NULL, NULL);          if (img_convert_ctx == NULL)          {              fprintf(stderr, "Cannot initialize the conversion context/n");              exit(1);          }      }      i=0;      while(av_read_frame(pFormatCtx, &packet)>=0)      {          // Is this a packet from the video stream?          if(packet.stream_index==videoStream)          {              // Decode video frame              avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);            // Did we get a video frame?              if(frameFinished)              {                  // Convert the image from its native format to RGB                  /*sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,                       0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);*/                  // Save the frame to disk                  /*if(++i<=5)                     SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);*/                  SDL_LockYUVOverlay(bmp);                  AVPicture pict;                  pict.data[0] = bmp->pixels[0];                  pict.data[1] = bmp->pixels[2];                  pict.data[2] = bmp->pixels[1];                    pict.linesize[0] = bmp->pitches[0];                  pict.linesize[1] = bmp->pitches[2];                  pict.linesize[2] = bmp->pitches[1];                    // Convert the image into YUV format that SDL uses                  /*img_convert(&pict, PIX_FMT_YUV420P,                     (AVPicture *)pFrame, pCodecCtx->pix_fmt,                     pCodecCtx->width, pCodecCtx->height);*/                  sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,                      0, pCodecCtx->height, pict.data, pict.linesize);                  SDL_UnlockYUVOverlay(bmp);                  rect.x = 0;                  rect.y = 0;                  rect.w = pCodecCtx->width;                  rect.h = pCodecCtx->height;                  SDL_DisplayYUVOverlay(bmp, &rect);                  //Sleep(60);              }          }            // Free the packet that was allocated by av_read_frame          av_free_packet(&packet);            SDL_PollEvent(&event);          switch(event.type)          {          case SDL_QUIT:              SDL_Quit();              exit(0);              break;          default: break;          }      };      // Free the RGB image      av_free(buffer);      //av_free(pFrameRGB);      // Free the YUV frame      av_free(pFrame);      // Close the codec      avcodec_close(pCodecCtx);      // Close the video file      av_close_input_file(pFormatCtx);      return 0;  };

今后一段时间里讲尽可能详细地解析上面的每一个类、语句的结构和作用,最终的目的是彻底搞通ffmpeg进行视频处理的原理,可以方便地完成基于ffmpeg的各种开发。一起加油吧!

原创粉丝点击