通过V4L2采集yuv数据,并用x264压缩数据成H264格式的文件

来源:互联网 发布:条码追溯软件 编辑:程序博客网 时间:2024/06/06 09:07

一、V4L2采集YUYV视频数据

a) 打开V4L2设备并创建接收yuyv数据的文件

open_v4l2_device(const char *const devname)

video_obj.v4l2_fd=open(devname,O_RDWR)//打卡v4l2设备

fopen(name,"wb+")//创建yuyv数据接收文件

b) 设置视频格式,分辨率

set_v4l2_fmt(unsigned int format,unsigned int width,unsigned int height)

video_obj.fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

video_obj.fmt.fmt.pix.pixelformat = format;

video_obj.fmt.fmt.pix.width = width;

video_obj.fmt.fmt.pix.height = height;

ioctl(video_obj.v4l2_fd, VIDIOC_S_FMT, &video_obj.fmt)

c) 获取当前的格式和分辨率,查看设置是否生效

struct v4l2_format fmt;

fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

ioctl(video_obj.v4l2_fd, VIDIOC_G_FMT, &fmt)

d) 设置帧率

set_v4l2_param(unsigned int num,unsigned int deno)

struct v4l2_streamparm param;

memset(&param, 0, sizeof(struct v4l2_streamparm));

param.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

param.parm.capture.timeperframe.numerator = num;

param.parm.capture.timeperframe.denominator =deno;

ioctl(video_obj.v4l2_fd, VIDIOC_S_PARM,&param)

e) 获取帧率,查看设置是否生效

get_v4l2_param(void)

struct v4l2_streamparm param;

memset(&param, 0, sizeof(struct v4l2_streamparm));

param.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

ioctl(video_obj.v4l2_fd, VIDIOC_G_PARM,&param)

f) 申请V4L2帧缓存

request_v4l2_buffer(unsigned int count)

video_obj.buffers = calloc(count, sizeof(VideoBuffer));

memset(&video_obj.req, 0, sizeof(video_obj.req));

video_obj.req.count = count;

video_obj.req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

video_obj.req.memory = V4L2_MEMORY_MMAP;

ioctl(video_obj.v4l2_fd, VIDIOC_REQBUFS, &video_obj.req)

g) 内存映射摄像头的缓存

for (numBufs = 0; numBufs < video_obj.req.count; numBufs++)

{

memset(&video_obj.buf, 0, sizeof(video_obj.buf));

//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)

video_obj.buf.memory = V4L2_MEMORY_MMAP;

video_obj.buf.index = numBufs;

//使配置生效

ioctl(video_obj.v4l2_fd, VIDIOC_QUERYBUF, &video_obj.buf)

video_obj.buffers[numBufs].length = video_obj.buf.length;

video_obj.buffers[numBufs].offset = (size_t)video_obj.buf.m.offset;

//使用mmap函数将申请的缓存地址转换应用程序的绝对地址

video_obj.buffers[numBufs].start =

mmap(NULL,video_obj.buf.length,PROT_READ|PROT_WRITE,

MAP_SHARED,video_obj.v4l2_fd,video_obj.buf.m.offset);

//放入缓存队列

ioctl(video_obj.v4l2_fd,VIDIOC_QBUF,&video_obj.buf)

}

h) 开始采集数据

i. 获取一帧缓存数据

start_v4l2_capture(void)

type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

ioctl(video_obj.v4l2_fd, VIDIOC_STREAMON, &type)

pull_v4l2_frame_buffer(unsigned int index,unsigned char **start,unsigned int *len)

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

video_obj.buf.memory = V4L2_MEMORY_MMAP;

video_obj.buf.index = index;

ioctl(video_obj.v4l2_fd,VIDIOC_DQBUF,&video_obj.buf)

*start = video_obj.buffers[index].start;

*len = video_obj.buffers[index].length;

ii. yuyv数据写入到文件(同时通过SDL2显示当前帧的数据)

fwrite(photo,1,len,fd);

fflush(fd);

iii. 将该帧缓存放入到缓存池中

push_v4l2_frame_buffer(unsigned int index)

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

video_obj.buf.memory = V4L2_MEMORY_MMAP;

video_obj.buf.index = index;

ioctl(video_obj.v4l2_fd, VIDIOC_QBUF, &video_obj.buf)

i) 清理动态申请的数据

release_v4l2_resource(void)

for (numBufs = 0; numBufs < video_obj.req.count; numBufs++)

munmap(video_obj.buffers[numBufs].start,video_obj.buf.length);

free(video_obj.buffers);

close(video_obj.v4l2_fd);

二、通过SDL2显示采集到的yuyv原始数据

a) 初始化sdl2需要用到的功能

sdl2_init(int w,int h)

SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER)

b) 创建新的窗口

sdl2_init(int w,int h)

win = SDL_CreateWindow("Sam",0,0,w,h,SDL_WINDOW_SHOWN|SDL_WINDOW_RESIZABLE);

c) 创建渲染器

sdl2_init(int w,int h)

renderer = SDL_CreateRenderer(win,-1,SDL_RENDERER_SOFTWARE);

d) 设置渲染器的纹理

sdl2_init(int w,int h)

Texture=SDL_CreateTexture(renderer,SDL_PIXELFORMAT_YUY2,

SDL_TEXTUREACCESS_STREAMING,w,h);

e) 创建SDL2的事件处理线程

pthread_create(&pid, NULL,event_loop,NULL)

SDL_PollEvent(&event)

f) 循环显示步骤1:更新渲染器纹理

sdl2_refresh(void *pixels,int pitch)

SDL_UpdateTexture(texture,NULL,pixels,pitch)

g) 循环显示步骤2:清空渲染器

sdl2_refresh(void *pixels,int pitch)

SDL_UpdateTexture(texture,NULL,pixels,pitch)

h) 循环显示步骤3:将纹理数据拷贝到渲染器

sdl2_refresh(void *pixels,int pitch)

SDL_RenderCopy(renderer,texture,NULL,NULL)

i) 循环显示步骤4:显示视频

sdl2_refresh(void *pixels,int pitch)

SDL_RenderPresent(renderer);

三、通过libX264压缩视频数据到H264

a) 创建相关结构体,打开yuyv文件和将要保存h264数据的文件

x264_nal_t* pNals = NULL;

x264_t* pHandle   = NULL;

x264_picture_t* pPic_in = (x264_picture_t*)malloc(sizeof(x264_picture_t));

x264_picture_t* pPic_out = (x264_picture_t*)malloc(sizeof(x264_picture_t));

x264_param_t* pParam = (x264_param_t*)malloc(sizeof(x264_param_t));

FILE* fp_src  = fopen("x.yuv", "rb");

FILE* fp_dst = fopen("x.h264", "wb");

b) 给结构体x264_param_t赋予一些默认的参数,修改参数中的宽高和数据格式(因为录制的时候是采用V4L2_PIX_FMT_YUYV格式,宽高为640x480)

x264_param_default(pParam);

pParam->i_width   = width;

pParam->i_height  = height;

pParam->i_csp = csp;

c) 设置profile

x264_param_apply_profile(pParam, x264_profile_names[4]);

d) 打开编码器

pHandle = x264_encoder_open(pParam);

e) 初始化帧数据的输入输出结构体

x264_picture_init(pPic_out);

x264_picture_alloc(pPic_in, csp, pParam->i_width, pParam->i_height);

f) 根据视频数据计算出视频帧数

fseek(fp_src,0,SEEK_END);

switch(csp){

case X264_CSP_I444:

frame_num=ftell(fp_src)/(y_size*3);

break;

case X264_CSP_I420:

frame_num=ftell(fp_src)/(y_size*3/2);

break;

case X264_CSP_I422:

frame_num=ftell(fp_src)/(y_size*2);

break;

}

fseek(fp_src,0,SEEK_SET);

g) 循环编码步骤1:分离yuv分量

h) 循环编码步骤2:编码一帧

i) 循环编码步骤3:将编码后的数据写入到h264文件中

上面三步骤包含的内容:

for( i=0;i<frame_num;i++){

switch(csp){

case X264_CSP_I444:{

fread(pPic_in->img.plane[0],y_size,1,fp_src);//Y

fread(pPic_in->img.plane[1],y_size,1,fp_src);//U

fread(pPic_in->img.plane[2],y_size,1,fp_src);//V

break;}

case X264_CSP_I420:{

fread(pPic_in->img.plane[0],y_size,1,fp_src);//Y

fread(pPic_in->img.plane[1],y_size/4,1,fp_src);//U

fread(pPic_in->img.plane[2],y_size/4,1,fp_src);//V

break;}

case X264_CSP_I422:{

/*

Yuyv格式数据的存放方式为:(4X4像素)

Y U Y V Y U Y V

Y U Y V Y U Y V

Y U Y V Y U Y V

Y U Y V Y U Y V

Y的个数为像素点的个数,

实际上像素点的个数为y个数的两倍

*/

int index = 0;

int y_i  = 0 , u_i  = 0 , v_i = 0;

for(index = 0 ; index < y_size*2 ;){

fread(&pPic_in->img.plane[0][y_i++],1,1,fp_src);//Y

index++;

fread(&pPic_in->img.plane[1][u_i++],1,1,fp_src);//U

index++;

fread(&pPic_in->img.plane[0][y_i++],1,1,fp_src);//Y

index++;

fread(&pPic_in->img.plane[2][v_i++],1,1,fp_src);//V

index++;

}break;

}

}

pPic_in->i_pts = i;

x264_encoder_encode(pHandle, &pNals, &iNal, pPic_in, pPic_out);

for ( j = 0; j < iNal; ++j)

fwrite(pNals[j].p_payload, 1, pNals[j].i_payload, fp_dst);

}

j)     编码步骤4:将还残留在编码器中的数据flush out,并写入到文件

while(1){

ret = x264_encoder_encode(pHandle, &pNals, &iNal, NULL, pPic_out);  

if(ret == 0)

Break;

for(j = 0;j < iNal; ++j)

fwrite(pNals[j].p_payload, 1, pNals[j].i_payload, fp_dst);

}

k) 清理动态申请的资源

x264_picture_clean(pPic_in);

x264_encoder_close(pHandle);

pHandle = NULL;

free(pPic_in);

free(pPic_out);

free(pParam);

fclose(fp_src);

fclose(fp_dst);

 

下面是两个项目的源码

说明:

1、V4L2采集数据和SDL2显示是在同一个项目中

2、YUV数据文件通过libx264压缩为H264格式的文件为一个项目

3、实验环境是vmwareubuntu系统,默认安装了SDL1.2,卸载了原来安装的1.2版本,重新编译安装SDL2.0

4、需要安装Libx264

5、项目中的源码大部分是参考网上各论坛的博客:

http://blog.csdn.net/leixiaohua1020/article/details/42078645

http://blog.csdn.net/yuanhubilie/article/details/37930429

文件v4l2lib.c(数据采集和显示项目)

//编译命令gcc v4l2lib.c -L/usr/local/SDL/lib/ -I/usr/local/SDL/include/SDL2 -lSDL2 -lpthread -o v4l2

//SDL的库安装路径为/usr/local/SDL,根据自己安装路径修改

#include <linux/videodev2.h>

#include <fcntl.h>

#include <unistd.h>

#include <stdlib.h>

#include <stdio.h>

#include <string.h>

#include <errno.h>

#include <sys/types.h>

#include <sys/stat.h>

#include <fcntl.h>

#include <sys/mman.h>

#include <sys/select.h>

#include <sys/time.h>

#include <pthread.h>

#include "SDL.h"

#include <stdio.h>

#include <time.h>

//--------macro definition------

#define MAX_DEV_NAME 32

#define MAX_BUF 5

#define VIDEO_REC

#define WIDTH_PIX 640

#define HEIGHT_PIX 480

//--------structions defined here-------

typedef struct VideoBuffer

{

unsigned char *start;

size_t offset;

size_t length;

}VideoBuffer;

 

typedef struct v4l2_param{

char v4l2_devname[MAX_DEV_NAME];//设备名

int v4l2_fd;//描述符号

VideoBuffer *buffers;

struct v4l2_requestbuffers req;

struct v4l2_capability cap;

struct v4l2_input input;

struct v4l2_format fmt;

struct v4l2_buffer buf;

}VIDEO_T;

//--------variable defined here-------

static VIDEO_T video_obj;

static pthread_t pid;

static unsigned char state = 0;

//--------SDL2----------

static unsigned char inited = 0;

static SDL_Window * win = NULL;

static SDL_Renderer * renderer = NULL;

static SDL_Texture * texture = NULL;

static SDL_CommonEvent comm;

static SDL_Event event;

 

static int open_v4l2_device(const char *const devname)

{

//打开设备

if(strlen(devname) >= MAX_DEV_NAME)

{

printf("device name fail:%s\n",devname);

return -1;

}

else

memset(&video_obj,0,sizeof(video_obj));

video_obj.v4l2_fd = open(devname,O_RDWR);

if(video_obj.v4l2_fd <= 0)

{

perror("open fail");

return -1;

}

else

printf("%s success\n",__func__);

memcpy(video_obj.v4l2_devname,devname,strlen(devname));

return 0;

}

 

static int set_v4l2_param(unsigned int num,unsigned int deno)

{

struct v4l2_streamparm param;

memset(¶m, 0, sizeof(struct v4l2_streamparm));

param.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

param.parm.capture.timeperframe.numerator = num;

param.parm.capture.timeperframe.denominator =deno;

if(ioctl(video_obj.v4l2_fd, VIDIOC_S_PARM,¶m) < 0)

{

printf("%s fail\n",__func__);

return -1;

}

else

{

printf("%s ok\n",__func__);

return 0;

}

}

 

static int get_v4l2_param(void)

{

struct v4l2_streamparm param;

memset(¶m, 0, sizeof(struct v4l2_streamparm));

param.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

if(ioctl(video_obj.v4l2_fd, VIDIOC_G_PARM, ¶m) < 0)

{

perror("get param failed");

return -1;

}

else

{

printf("%s:%d/%d\n",__func__,

param.parm.capture.timeperframe.numerator,param.parm.capture.timeperframe.denominator);

return 0;

}

}

 

static int set_v4l2_fmt(unsigned int format,

unsigned int width,unsigned int height)

{

//设置视频格式

memset(&video_obj.fmt,0,sizeof(video_obj.fmt));

//视频数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE

video_obj.fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

//视频源的格式为JPEGYUN4:2:2RGB  V4L2_PIX_FMT_RGB565  V4L2_PIX_FMT_YUV565

video_obj.fmt.fmt.pix.pixelformat = format;

//video_obj.fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;

//设置视频宽度

video_obj.fmt.fmt.pix.width = width;

//设置视频高度

video_obj.fmt.fmt.pix.height = height;

if (ioctl(video_obj.v4l2_fd, VIDIOC_S_FMT, &video_obj.fmt) < 0)//使配置生效

{

perror("set format failed");

return -1;

}

else

printf("%s success[format:%X w:%d h:%d]\n",__func__,format,width,height);

 

struct v4l2_format fmt;

fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

if (ioctl(video_obj.v4l2_fd, VIDIOC_G_FMT, &fmt) < 0)

{

perror("set format failed");

return -1;

}

else

printf("%s get success[format:%X w:%d h:%d]\n",

__func__,fmt.fmt.pix.pixelformat,fmt.fmt.pix.width,fmt.fmt.pix.height);

return 0;

}

 

static int request_v4l2_buffer(unsigned int count)

{

//申请帧缓冲

video_obj.buffers = calloc(count, sizeof(VideoBuffer));

memset(&video_obj.req, 0, sizeof(video_obj.req));

//缓存数量,即可保存的图片数量

video_obj.req.count = count;

//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE

video_obj.req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

//存储类型:V4L2_MEMORY_MMAPV4L2_MEMORY_USERPTR

video_obj.req.memory = V4L2_MEMORY_MMAP;

//使配置生效

if (ioctl(video_obj.v4l2_fd, VIDIOC_REQBUFS, &video_obj.req) == -1)

{

perror("request buffer error \n");

return -1;

}

else

printf("%s success[request %d buffers]\n",__func__,count);

return 0;

}

 

static int mmap_v4l2_buffer(void)

{

//VIDIOC_REQBUFS获取内存转为物理空间

int numBufs;

for (numBufs = 0; numBufs < video_obj.req.count; numBufs++)

{

memset(&video_obj.buf, 0, sizeof(video_obj.buf));

//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)

video_obj.buf.memory = V4L2_MEMORY_MMAP;

video_obj.buf.index = numBufs;

//使配置生效

if (ioctl(video_obj.v4l2_fd, VIDIOC_QUERYBUF, &video_obj.buf) < 0)

{

perror("VIDIOC_QUERYBUF");

return -1;

}

//printf("request buf %d success\n",numBufs);

video_obj.buffers[numBufs].length = video_obj.buf.length;

video_obj.buffers[numBufs].offset = (size_t)video_obj.buf.m.offset;

//使用mmap函数将申请的缓存地址转换应用程序的绝对地址

video_obj.buffers[numBufs].start = mmap(NULL,video_obj.buf.length,

PROT_READ|PROT_WRITE,MAP_SHARED,video_obj.v4l2_fd,video_obj.buf.m.offset);

if (video_obj.buffers[numBufs].start == MAP_FAILED)

{

perror("buffers error");

return -1;

}

//printf("mmap buf 0x%p lenght:%d success\n",video_obj.buffers[numBufs].start,video_obj.buf.length);

//放入缓存队列

if (ioctl(video_obj.v4l2_fd,VIDIOC_QBUF,&video_obj.buf) < 0)

{

printf("VIDIOC_QBUF");

return -1;

}

}

printf("%s success\n",__func__);

return 0;

}

 

static int start_v4l2_capture(void)

{

//开始视频显示

enum v4l2_buf_type type;

//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE

type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

if (ioctl(video_obj.v4l2_fd, VIDIOC_STREAMON, &type) < 0)

{

perror("VIDIOC_STREAMON");

return -1;

}

printf("%s stream on success\n",__func__);

return 0;

}

 

static int pull_v4l2_frame_buffer(unsigned int index , unsigned char **start , unsigned int *len)

{

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; //取得原始采集数据

video_obj.buf.memory = V4L2_MEMORY_MMAP; //存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)

if(video_obj.req.count <= index)

return -1;

video_obj.buf.index = index; //读取缓存中的第几帧

if (ioctl(video_obj.v4l2_fd,VIDIOC_DQBUF,&video_obj.buf) < 0)

{

perror("VIDIOC_DQBUF");

return -1;

}

*start = video_obj.buffers[index].start;

*len = video_obj.buffers[index].length;

return 0;

}

 

static int push_v4l2_frame_buffer(unsigned int index)

{

video_obj.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; //取得原始采集数据

video_obj.buf.memory = V4L2_MEMORY_MMAP; //存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)

if(video_obj.req.count <= index)

return -1;

video_obj.buf.index = index; //第几帧放入缓存

//获取下一帧视频数据

if (ioctl(video_obj.v4l2_fd, VIDIOC_QBUF, &video_obj.buf) < 0)

{

perror("VIDIOC_QBUF");

return -1;

}

return 0;

}

 

static void release_v4l2_resource(void)

{

int numBufs;

for (numBufs = 0; numBufs < video_obj.req.count; numBufs++)

munmap(video_obj.buffers[numBufs].start,video_obj.buf.length);

free(video_obj.buffers);

close(video_obj.v4l2_fd);

printf("%s\n",__func__);

}

 

static int sdl2_init(int w,int h)

{

if(SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER) == -1)

{

printf("SDL_Init fail!");

return -1;

}

else

printf("SDL_Init success\n");

/*

title  :窗口标题

x :窗口位置x坐标。也可以设置为SDL_WINDOWPOS_CENTEREDSDL_WINDOWPOS_UNDEFINED

y :窗口位置y坐标。同上。

w    :窗口的宽

h :窗口的高

flags :支持下列标识。包括了窗口的是否最大化、最小化,能否调整边界等等属性。

   ::SDL_WINDOW_FULLSCREEN,    ::SDL_WINDOW_OPENGL,

   ::SDL_WINDOW_HIDDEN,    ::SDL_WINDOW_BORDERLESS,

   ::SDL_WINDOW_RESIZABLE,    ::SDL_WINDOW_MAXIMIZED,

   ::SDL_WINDOW_MINIMIZED,    ::SDL_WINDOW_INPUT_GRABBED,

   ::SDL_WINDOW_ALLOW_HIGHDPI.

*/

win = SDL_CreateWindow("Sam",0,0,w,h,SDL_WINDOW_SHOWN|SDL_WINDOW_RESIZABLE);

if(win == NULL)

{

printf("SDL_CreateWindow fail\n");

return -1;

}

else

{

printf("SDL_CreateWindow success\n");

}

//创建渲染器

/*

window    渲染的目标窗口。

index   :打算初始化的渲染设备的索引。设置-1”则初始化默认的渲染设备。

flags    :支持以下值(位于SDL_RendererFlags定义中)

SDL_RENDERER_SOFTWARE :使用软件渲染

SDL_RENDERER_ACCELERATED :使用硬件加速

SDL_RENDERER_PRESENTVSYNC:和显示器的刷新率同步

SDL_RENDERER_TARGETTEXTURE :不太懂

*/

renderer = SDL_CreateRenderer(win,-1,SDL_RENDERER_SOFTWARE);

if(renderer == NULL)

{

printf("SDL_CreateRenderer fail\n");

return -1;

}

else

{

printf("SDL_CreateRenderer success\n");

}

/*

参数的含义如下。

renderer:目标渲染器。

format :纹理的格式。后面会详述。

access :可以取以下值(定义位于SDL_TextureAccess中)

SDL_TEXTUREACCESS_STATIC  :变化极少

SDL_TEXTUREACCESS_STREAMING    :变化频繁

SDL_TEXTUREACCESS_TARGET    :暂时没有理解

w :纹理的宽

h :纹理的高

*/

texture = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_YUY2,SDL_TEXTUREACCESS_STREAMING,w,h);

if(texture == NULL)

{

printf("SDL_CreateTexture fail\n");

return -1;

}

else

{

printf("SDL_CreateTexture success\n");

}

return 0;

}

 

static void *event_loop(void *param)

{

printf("%s begin time:%d\n",__func__,SDL_GetTicks());

while(1)

{

if(SDL_PollEvent(&event) > 0 &&

(comm.type != event.common.type || comm.timestamp != event.common.timestamp))

{

comm.type = event.common.type;

comm.timestamp = event.common.timestamp;

switch(event.type)

{  

case SDL_QUIT:

printf("SDL_WINDOWEVENT\n");state = 1;return NULL;

case SDL_WINDOWEVENT:

printf("SDL_WINDOWEVENT\n");break;

case SDL_SYSWMEVENT:

printf("SDL_SYSWMEVENT\n");break;

case SDL_KEYDOWN:

printf("SDL_KEYDOWN\n");break;

case SDL_KEYUP:

printf("SDL_KEYUP\n");break;

case SDL_TEXTEDITING:

printf("SDL_TEXTEDITING\n");break;

case SDL_TEXTINPUT:

printf("SDL_TEXTINPUT\n");break;

case SDL_KEYMAPCHANGED:

printf("SDL_KEYMAPCHANGED\n");break;

case SDL_MOUSEMOTION:

printf("SDL_MOUSEMOTION\n");break;

case SDL_MOUSEBUTTONDOWN:

printf("SDL_MOUSEBUTTONDOWN\n");break;

case SDL_MOUSEBUTTONUP:

printf("SDL_MOUSEBUTTONUP\n");break;

case SDL_MOUSEWHEEL:

printf("SDL_MOUSEWHEEL\n");break;

default:

printf("%X\n",event.type);

break;

}

}

}

printf("%s end time:%d\n",__func__,SDL_GetTicks());

return NULL;

}

 

static int sdl2_refresh(void *pixels,int pitch)

{

if(inited == 0)

{

if(sdl2_init(WIDTH_PIX,HEIGHT_PIX))

return -1;

inited = 1;

if(pthread_create(&pid, NULL,event_loop,NULL) != 0)

{

printf("pthread_create fail\n");

return;

}

}

/*

参数的含义如下。

texture:目标纹理。

rect:更新像素的矩形区域。设置为NULL的时候更新整个区域。

pixels:像素数据。

pitch:一行像素数据的字节数。

*/

if(SDL_UpdateTexture(texture,NULL,pixels,pitch) != 0)

{

printf("SDL_UpdateTexture fail\n");

return -1;

}

//清空渲染

    SDL_RenderClear(renderer);

/*

参数的含义如下。

renderer:渲染目标。

texture:输入纹理。

srcrect:选择输入纹理的一块矩形区域作为输入。设置为NULL的时候整个纹理作为输入。

dstrect:选择渲染目标的一块矩形区域作为输出。设置为NULL的时候整个渲染目标作为输出。

*/

if(SDL_RenderCopy(renderer,texture,NULL,NULL) != 0)

{

printf("SDL_RenderCopy fail\n");

return -1;

}

SDL_RenderPresent(renderer);

return 0;

}

 

static void sdl2_uninit(void)

{

SDL_DestroyTexture(texture);

SDL_DestroyRenderer(renderer);

SDL_DestroyWindow(win);

SDL_Quit();

printf("SDL uninit\n");

}

 

static int generate_yuv_name(char *name,int maxlen)

{

//实例化time_t结构

time_t now;

time(&now);

//localtime函数把从time取得的时间now换算成你电脑中的时间(就是你设置的地区)

snprintf(name,maxlen,"%d.yuv",2);

printf("%s:%s\n",__func__,name);

return 0;

}

 

void main(int argc ,char *argv[])

{

int index = 0;

int cnt = 0;

unsigned char *photo = NULL;

unsigned int len = 0;

char name[64] = {0};

if(open_v4l2_device(argv[1])!=0)

return;

#ifdef VIDEO_REC

generate_yuv_name(name,sizeof(name));

FILE * fd = fopen(name,"wb+");

if(fd == NULL)

{

printf("open fail:%s\n",name);

return;

}

#endif

if(set_v4l2_fmt(V4L2_PIX_FMT_YUYV,WIDTH_PIX,HEIGHT_PIX))

return ;

if(set_v4l2_param(1,30))

return ;

if(get_v4l2_param())

return ;

if(request_v4l2_buffer(MAX_BUF))

return ;

if(mmap_v4l2_buffer())

return ;

if(start_v4l2_capture())

return ;

while(1)

{

//printf("cnt:%d\n",cnt++);

if(pull_v4l2_frame_buffer(index,&photo,&len))

break;

#ifdef VIDEO_REC

{

int l = fwrite(photo,1,len,fd);

if(l != (HEIGHT_PIX*WIDTH_PIX*2))

{

printf("write fail:%s [%d]\n",name,l);

fclose(fd);

break;

}

fflush(fd);

}

#endif

if(sdl2_refresh(photo,WIDTH_PIX*2))

break;

push_v4l2_frame_buffer(index);

index++;

if(index == MAX_BUF)

index = 0;

if(state == 1)

break;

usleep(1000*30);

}

sdl2_uninit();

release_v4l2_resource();

}

 

 

文件x264.c(视频数据压缩项目)

//编译命令:gcc x264.c -L/usr/local/x264/lib/ -I/usr/local/x264/include/ -lx264 -o x264

//libx264库安装在/usr/local/x264/,需要更具自己的安装目录修改命令

#include <stdio.h>

#include <stdlib.h>

#include "stdint.h"

#if defined ( __cplusplus)

extern "C"

{

#include "x264.h"

};

#else

#include "x264.h"

#endif

int main(int argc, char** argv)  

{  

int ret;

int y_size;

int i,j;

//Encode 50 frame

//if set 0, encode all frame

int frame_num = 0;

const int csp = X264_CSP_I422;

int width = 640;

int height = 480;

int iNal = 0;

x264_nal_t* pNals = NULL;

x264_t* pHandle   = NULL;

x264_picture_t* pPic_in = (x264_picture_t*)malloc(sizeof(x264_picture_t));

x264_picture_t* pPic_out = (x264_picture_t*)malloc(sizeof(x264_picture_t));

x264_param_t* pParam = (x264_param_t*)malloc(sizeof(x264_param_t));

FILE* fp_src  = fopen("2.yuv", "rb");

FILE* fp_dst = fopen("x.h264", "wb");

//Check

if(fp_src==NULL||fp_dst==NULL)

{

printf("Error open files.\n");

return -1;

}

x264_param_default(pParam);

pParam->i_width   = width;

pParam->i_height  = height;

//Param

/*

pParam->i_log_level  = X264_LOG_DEBUG;

pParam->i_threads = X264_SYNC_LOOKAHEAD_AUTO;

pParam->i_frame_total = 0;

pParam->i_keyint_max = 10;

pParam->i_bframe  = 0;

pParam->b_open_gop  = 0;

pParam->i_bframe_pyramid = 0;

pParam->rc.i_qp_constant=0;

pParam->rc.i_qp_max=0;

pParam->rc.i_qp_min=0;

pParam->i_bframe_adaptive = X264_B_ADAPT_TRELLIS;

pParam->i_fps_den = 1;

pParam->i_fps_num = 25;

pParam->i_timebase_den = pParam->i_fps_num;

pParam->i_timebase_num = pParam->i_fps_den;

*/

pParam->i_csp = csp;

x264_param_apply_profile(pParam, x264_profile_names[4]);

pHandle = x264_encoder_open(pParam);

x264_picture_init(pPic_out);

x264_picture_alloc(pPic_in, csp, pParam->i_width, pParam->i_height);

y_size = pParam->i_width * pParam->i_height;

printf("w:%d h:%d\r\n",pParam->i_width,pParam->i_height);

//detect frame number

if(frame_num==0)

{

fseek(fp_src,0,SEEK_END);

switch(csp)

{

case X264_CSP_I444:

frame_num=ftell(fp_src)/(y_size*3);

break;

case X264_CSP_I420:

frame_num=ftell(fp_src)/(y_size*3/2);

break;

case X264_CSP_I422:

frame_num=ftell(fp_src)/(y_size*2);

break;

default:

printf("Colorspace Not Support.\n");

return -1;

}

fseek(fp_src,0,SEEK_SET);

}

printf("frame_num:%d y_size:%d\r\n",frame_num,y_size);

//Loop to Encode  

for( i=0;i<frame_num;i++)

{

switch(csp)

{

case X264_CSP_I444:

{

fread(pPic_in->img.plane[0],y_size,1,fp_src);   //Y

fread(pPic_in->img.plane[1],y_size,1,fp_src);   //U

fread(pPic_in->img.plane[2],y_size,1,fp_src);   //V

break;

}

case X264_CSP_I420:

{

fread(pPic_in->img.plane[0],y_size,1,fp_src); //Y

fread(pPic_in->img.plane[1],y_size/4,1,fp_src); //U

fread(pPic_in->img.plane[2],y_size/4,1,fp_src); //V

break;

}

case X264_CSP_I422:

{

int index = 0;

int y_i  = 0 , u_i  = 0 , v_i = 0;

for(index = 0 ; index < y_size*2 ;)

{

fread(&pPic_in->img.plane[0][y_i++],1,1,fp_src);//Y

index++;

fread(&pPic_in->img.plane[1][u_i++],1,1,fp_src);//U

index++;

fread(&pPic_in->img.plane[0][y_i++],1,1,fp_src);//Y

index++;

fread(&pPic_in->img.plane[2][v_i++],1,1,fp_src);//V

index++;

}

break;

}

default:

{

printf("Colorspace Not Support.\n");

return -1;

}

}

pPic_in->i_pts = i;

ret = x264_encoder_encode(pHandle, &pNals, &iNal, pPic_in, pPic_out);

if (ret< 0)

{

printf("Error.\n");

return -1;

}

for ( j = 0; j < iNal; ++j)

fwrite(pNals[j].p_payload, 1, pNals[j].i_payload, fp_dst);

}

//flush encoder

while(1)

{

ret = x264_encoder_encode(pHandle, &pNals, &iNal, NULL, pPic_out);  

if(ret==0)

break;

//printf("Flush 1 frame.\n");

for(j = 0;j < iNal; ++j)

fwrite(pNals[j].p_payload, 1, pNals[j].i_payload, fp_dst);

}

x264_picture_clean(pPic_in);

x264_encoder_close(pHandle);

pHandle = NULL;

free(pPic_in);

free(pPic_out);

free(pParam);

fclose(fp_src);

fclose(fp_dst);

return 0;

}