ffmpeg学习---3.Outputting to the Screen
来源:互联网 发布:变频器模拟软件 编辑:程序博客网 时间:2024/05/17 07:36
1. 2output.c
2.Makefile
二. 更新版
1. 代码
现在ffmpeg发布了ffmpeg-2.7.2,下面基于这个版本更新一下,同时加上注解,下次看的时候理解就更深了
说明1: SDL_CreateYUVOverlay与sws_getContext中的参数是相对应的
SDL_CreateYUVOverlay SDL_YV12_OVERLAY
SDL_YUY2_OVERLAY
sws_getContext PIX_FMT_YUV420P
PIX_FMT_YUYV422
SDL_YV12_OVERLAY + PIX_FMT_YUYV422: 则显示不正常
SDL_YUY2_OVERLAY + PIX_FMT_YUV420P: 则sws_scale会报错bad dst image pointers
说明2:sws_scale之后就可以把yuv420p数据写到文件中
FILE *fp_yuv;
fp_yuv=fopen("output.yuv","wb+");
int y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
1.2 Makefile
2.运行
3. 代码打包
1view.rar (下载后改名为1view.tar.gz)
- // tutorial01.c
- // Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
- // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
- // A small sample program that shows how to use libavformat and libavcodec to
- // read video from a file.
- //
- // Use
- //
- // gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz
- //
- // to build (assuming libavformat and libavcodec are correctly installed
- // your system).
- //
- // Run using
- //
- // tutorial01 myvideofile.mpg
- //
- // to write the first five frames from "myvideofile.mpg" to disk in PPM
- // format.
- #include <stdio.h>
- #include <libavformat/avformat.h>
- #include <libswscale/swscale.h>
- #include <SDL.h>
- int main(int argc, char *argv[]) {
- AVFormatContext *pFormatCtx;
- int i, videoStream;
- AVCodecContext *pCodecCtx;
- AVCodec *pCodec;
- AVFrame *pFrame;
- AVFrame *pFrameRGB;
- AVPacket packet;
- int frameFinished;
- int numBytes;
- uint8_t *buffer;
- if(argc < 2) {
- printf("Please provide a movie file\n");
- return -1;
- }
-
- if(SDL_Init(SDL_INIT_VIDEO|SDL_INIT_AUDIO|SDL_INIT_TIMER))
- {
- fprintf(stderr, "SD_init error \n");
- exit(1);
- }
- // Register all formats and codecs
- av_register_all();
- pFormatCtx = avformat_alloc_context();
- // Open video file
- //if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
- if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
- return -1; // Couldn't open file
- // Retrieve stream information
- if(avformat_find_stream_info(pFormatCtx,NULL)<0)
- return -1; // Couldn't find stream information
- // Dump information about file onto standard error
- av_dump_format(pFormatCtx, 0, argv[1], 0);
- // Find the first video stream
- videoStream=-1;
- for(i=0; i<pFormatCtx->nb_streams; i++)
- if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
- {
- videoStream=i;
- break;
- }
- if(videoStream==-1)
- return -1; // Didn't find a video stream
- // Get a pointer to the codec context for the video stream
- pCodecCtx=pFormatCtx->streams[videoStream]->codec;
- // Find the decoder for the video stream
- pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
- if(pCodec==NULL) {
- fprintf(stderr, "Unsupported codec!\n");
- return -1; // Codec not found
- }
- // Open codec
- if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
- return -1; // Could not open codec
- //Set up a screen
- SDL_Surface * screen;
- screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
- if(!screen)
- {
- fprintf(stderr, "SetVideoMode error\n");
- exit(1);
- }
- //create a YUV overlay
- SDL_Overlay *bmp;
- SDL_Rect rect;
- SDL_Event event;
- bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
- SDL_YV12_OVERLAY, screen);
- // Allocate video frame
- pFrame=avcodec_alloc_frame();
- // Allocate an AVFrame structure
- pFrameRGB=avcodec_alloc_frame();
- if(pFrameRGB==NULL)
- return -1;
- // Determine required buffer size and allocate buffer
- numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
- pCodecCtx->height);
- buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
- // Assign appropriate parts of buffer to image planes in pFrameRGB
- // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
- // of AVPicture
- avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
- pCodecCtx->width, pCodecCtx->height);
- // Read frames and display on screen
- while(av_read_frame(pFormatCtx, &packet)>=0) {
- // Is this a packet from the video stream?
- if(packet.stream_index==videoStream) {
- // Decode video frame
- avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
- // Did we get a video frame?
- if(frameFinished) {
- SDL_LockYUVOverlay(bmp);
- AVPicture pict;
- pict.data[0] = bmp->pixels[0];
- pict.data[1] = bmp->pixels[2];
- pict.data[2] = bmp->pixels[1];
- pict.linesize[0] = bmp->pitches[0];
- pict.linesize[1] = bmp->pitches[2];
- pict.linesize[2] = bmp->pitches[1];
- // img_convert(&pict, PIX_FMT_YUV420P, (AVPicture*)pFrame, pCodecCtx->pix_fmt,
- // pCodecCtx->width, pCodecCtx->height);
- struct SwsContext *img_convert_ctx;
- img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
- if(img_convert_ctx == NULL)
- {
- fprintf(stderr, "Cannot initialize the conversion context!\n");
- exit(1);
- }
- sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize);
- SDL_UnlockYUVOverlay(bmp);
- rect.x = 0;
- rect.y = 0;
- rect.w = pCodecCtx->width;
- rect.h = pCodecCtx->height;
- SDL_DisplayYUVOverlay(bmp, &rect);
- }
- }
- // Free the packet that was allocated by av_read_frame
- av_free_packet(&packet);
- SDL_PollEvent(&event);
- switch(event.type)
- {
- case SDL_QUIT:
- SDL_Quit();
- exit(0);
- break;
- default:
- break;
- }
- }
- // Free the RGB image
- av_free(buffer);
- av_free(pFrameRGB);
- // Free the YUV frame
- av_free(pFrame);
- // Close the codec
- avcodec_close(pCodecCtx);
- // Close the video file
- avformat_close_input(&pFormatCtx);
- return 0;
- }
- CC=gcc
- CFLAGS = -g -I/home/sun/code/ffmpeg-1.0/install/include/ -I/home/sun/code/SDL-1.2.15/install/include/SDL/
- LDFLAGS = -L/home/sun/code/ffmpeg-1.0/install/lib/ -lavutil -lavformat -lavcodec -lavutil -lm -lswscale
- LDFLAGS += -L/home/sun/code/SDL-1.2.15/install/lib/ -lSDLmain -lSDL
- TARGETS=2output
- all: $(TARGETS)
- 2output.o:2output.c
- $(CC) $(CFLAGS) -o $@ -c $^
- 2output:2output.o
- $(CC) -o $@ $^ $(LDFLAGS)
- clean:
- rm -rf *.o $(TARGETS)
二. 更新版
1. 代码
现在ffmpeg发布了ffmpeg-2.7.2,下面基于这个版本更新一下,同时加上注解,下次看的时候理解就更深了
- cong@msi:/work/ffmpeg/test/1view$ cat view.c
- #include "utils.h"
- #include <libavformat/avformat.h>
- #include <libswscale/swscale.h>
- #include <SDL/SDL.h>
- SDL_mutex *affmutex;
- SDL_Event sdlevent;
- int signal_quit = 1;
- static int eventThread(void* data)
- {
- while(signal_quit)
- {
- SDL_LockMutex(affmutex);
- while(SDL_PollEvent(&sdlevent))
- {
- switch(sdlevent.type)
- {
- case SDL_QUIT:
- {
- signal_quit = 0;
- }
- break;
- default:
- break;
- }
- }
- SDL_UnlockMutex(affmutex);
- }
- }
- int main(int argc, char **argv)
- {
- int i=0;
- int ret;
- int videoindex= -1;
- int frameFinished;
- AVFormatContext *pFormatCtx = NULL;
- AVCodecContext * pCodecCtx;
- AVCodec * pCodec;
- AVFrame * pFrame;
- AVFrame * pFrameYUV;
- AVPacket * packet;
- struct SwsContext *img_convert_ctx;
- SDL_Surface* psscreen;
- SDL_Overlay* overlay;
- SDL_Rect rect;
- SDL_Thread* sdl_thread;
- //a. ffmpeg的初始化(虽然名字是register就这么说吧)
- avcodec_register_all();
- avfilter_register_all();
- av_register_all();
-
- //b.打开视频文件
- pFormatCtx = avformat_alloc_context();
- if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
- return -1;
- //c.获取视频文件的流信息,(即查看有几个视频流几个音频流)
- if(avformat_find_stream_info(pFormatCtx, NULL)<0)
- return -1;
- av_dump_format(pFormatCtx,0, 0, 0);
- //d.获取了视频流的index,这样以后读取一帧之后,根据索引号才能判断这一帧是不是视频帧
- for(i=0; i<pFormatCtx->nb_streams; i++)
- {
- if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
- {
- videoindex= i;
- break;
- }
- }
- if(videoindex== -1)
- {
- dbmsg("no video stream found!");
- return -1;
- }
- dbmsg("videoindex=%d", videoindex);
- //e.为视频流寻找解码器:在c中不仅有视频流的index,还有视频流的编码codec_id
- //通过这个codec_id就可以寻到视频流的解码器
- pCodecCtx = pFormatCtx->streams[videoindex]->codec;
- pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
- if(pCodec == NULL)
- {
- dbmsg("Codec not found");
- return -1;
- }
- if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) //找到解码器之后打开解码器
- return -1;
- pFrame = av_frame_alloc(); //以前的avcodec_alloc_frame函数现在不用了
- pFrameYUV = av_frame_alloc();
- //显示的准备:SDL初始化,设置显示模式,创建画布
- SDL_Init(SDL_INIT_EVERYTHING);
- psscreen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, SDL_SWSURFACE);
- SDL_WM_SetCaption( "FFMPEG Window", NULL);
- //注意这儿的参数SDL_YU12_OVERLAY与SDL_YUY2_OVERLAY一定要与下面sws_scale中的参数配套
- //overlay = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, psscreen);
- overlay = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YUY2_OVERLAY, psscreen);
- //与SDL的退出相关
- affmutex = SDL_CreateMutex();
- sdl_thread = SDL_CreateThread(eventThread, NULL); //创建SDL线程监测退出信号
- rect.x = 0;
- rect.y = 0;
- rect.w = pCodecCtx->width;
- rect.h = pCodecCtx->height;
- packet = (AVPacket*)av_malloc(sizeof(AVPacket));
- //注意这儿的参数PIX_FMT_YUV420P或PIX_FMT_YUYU422一定要与上面SDL_CreateYUVOVerlay中的参数配套
- //img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
- img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUYV422, SWS_BICUBIC, NULL, NULL, NULL);
- if(img_convert_ctx == NULL)
- {
- dbmsg("img_convert error");
- return -1;
- }
- //f.循环读取视频文件所有的帧,包括音频帧与视频帧:读取帧到packet
- while( (av_read_frame(pFormatCtx, packet)>=0) && (signal_quit))
- {
- //g.判断是视频帧,则对视频帧解码
- if(packet->stream_index == videoindex)
- { //解码packet中的视频帧到pFrame中
- if((ret=avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, packet)) < 0)
- {
- dbmsg("decocode video error");
- return -1;
- }
- if(frameFinished)
- {
- SDL_LockYUVOverlay(overlay);
- pFrameYUV->data[0] = overlay->pixels[0]; //Y
- pFrameYUV->data[1] = overlay->pixels[2]; //U //对overlay中的数据组织不清楚,为什么这儿要把uv交换呢?有知道的告诉我一下,谢谢
- pFrameYUV->data[2] = overlay->pixels[1]; //V
- pFrameYUV->linesize[0] = overlay->pitches[0];
- pFrameYUV->linesize[1] = overlay->pitches[2];
- pFrameYUV->linesize[2] = overlay->pitches[1];
- sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0,
- pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); //将解码后pFrame中的数据转为sws_getContext中设定的格式,这样就可以通过sdl进行显示了
- SDL_UnlockYUVOverlay(overlay);
- SDL_DisplayYUVOverlay(overlay, &rect);
- SDL_Delay(40);
- }
- }
- }
- SDL_WaitThread(sdl_thread, &ret);
- SDL_DestroyMutex(affmutex);
- return 0;
- }
SDL_CreateYUVOverlay SDL_YV12_OVERLAY
SDL_YUY2_OVERLAY
sws_getContext PIX_FMT_YUV420P
PIX_FMT_YUYV422
SDL_YV12_OVERLAY + PIX_FMT_YUYV422: 则显示不正常
SDL_YUY2_OVERLAY + PIX_FMT_YUV420P: 则sws_scale会报错bad dst image pointers
说明2:sws_scale之后就可以把yuv420p数据写到文件中
FILE *fp_yuv;
fp_yuv=fopen("output.yuv","wb+");
int y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
1.2 Makefile
- cong@msi:/work/ffmpeg/test/1view$ cat Makefile
- EXE=view
- CC=gcc
- FFMPEG=/work/ffmpeg/out
- CFLAGS=-g -O0 -I$(FFMPEG)/include
- LDFLAGS = -L$(FFMPEG)/lib/ -lswscale -lswresample -lavformat -lavdevice -lavcodec -lavutil -lavfilter -lm -lSDL
- SRC=$(wildcard *.c)
- OBJ=$(patsubst %.c,%.o,$(SRC))
- DEP=$(patsubst %.c,.%.d,$(SRC))
- $(EXE):$(OBJ)
- $(CC) $(CFLAGS) $^ -o $@ $(LDFLAGS)
- $(DEP):.%.d:%.c
- @set -e; rm -f $@; \
- $(CC) -MM $< > $@.$$$$; \
- sed 's,/($*/)/.o[ :]*,/1.o $@ : ,g' < $@.$$$$ > $@; \
- rm -f $@.$$$$
- -include $(DEP)
- clean:
- @rm $(EXE) $(OBJ) $(DEP) -f
- run:
- export LD_LIBRARY_PATH=$(FFMPEG)/lib/ \
- && ./$(EXE) ../resource/bing.rmvb
2.运行
- cong@msi:/work/ffmpeg/test/1view$ make run
- export LD_LIBRARY_PATH=/work/ffmpeg/out/lib/ \ -->己经在Makefile中写好了视频文件的路径
- && ./view ../resource/bing.rmvb
1view.rar (下载后改名为1view.tar.gz)
0 0
- ffmpeg学习---3.Outputting to the Screen
- Tutorial 02: Outputting to the Screen
- Tutorial 02: Outputting to the Screen
- FFmpeg和SDL教程之二(Outputting to the Screen)
- Tutorial 02: Outputting to the Screen输出到屏幕
- wiki: How to grab the desktop (screen) with FFmpeg
- Use ImageMagick to Capture the screen
- Tap anywhere on the screen to continue
- print 'welcome to masm!' in the center of the screen.
- Umbraco(4)-Outputting the Document Type Properties
- 如何分屏幕(How to split the Screen)
- [转]如何分屏幕(How to split the Screen)
- How to use the Journal Import Correction Screen - from Metalink
- Four Ways to Deal With the Longer iPhone 5 screen
- Thransform the vertex from model space to screen space. opengl.
- 如何修改Screen背景?How to - Change the background color of a screen
- screen 学习
- select the appropriate stylesheet according to the user's screen resolution
- ffmpeg学习---2.Making Screencaps
- BP神经网络(一)用三张图,看懂BP神经网络
- latex算法流程图
- python os.path模块常用方法详解
- ANSI转UTF-8
- ffmpeg学习---3.Outputting to the Screen
- RecyclerView添加HeaderView和FooterView
- HDU 2296
- hibernate中使用sql
- Android 解决滑动冲突 疑问和解答
- javascript 预处理
- strtok、strtok_s、strtok_r 字符串分割函数
- ffmpeg学习---4.Playing Sound
- 建议29:区别LINQ查询中的IEnumerable<T>和IQueryable<T>