3.6 mxc_v4l_ioctl函数分析

来源:互联网 发布:罗氏lc480软件安装 编辑:程序博客网 时间:2024/05/17 23:11

下面分析mxc_v4l_ioctl函数:

static long mxc_v4l_ioctl(struct file *file, unsigned int cmd,  unsigned long arg) { pr_debug("In MVC:mxc_v4l_ioctl\n"); return video_usercopy(file, cmd, arg, mxc_v4l_do_ioctl); }

它通过调用video_usercopy函数,最终就调用到这个mxc_v4l_do_ioctl函数,这个mxc_v4l_do_ioctl函数内部包含一个switch语句,根据传入不同的ioctl宏来选择执行不同的语句。下面就按不同的宏来分析。


我们根据一般摄像头应用程序的执行调用过程来顺序分析。

1.VIDIOC_QUERYCAP

case VIDIOC_QUERYCAP: { struct v4l2_capability *cap = arg; pr_debug("   case VIDIOC_QUERYCAP\n"); strcpy(cap->driver, "mxc_v4l2"); cap->version = KERNEL_VERSION(0, 1, 11); cap-> = V4L2_CAP_VIDEO_CAPTURE |     V4L2_CAP_VIDEO_OVERLAY |     V4L2_CAP_STREAMING |     V4L2_CAP_READWRITE; cap->card[0] = '\0'; cap->bus_info[0] = '\0'; break; }

它的目的很简单,只是简单问问“你是谁?你能干什么?”基本就是驱动来填充应用程序传入的v4l2_capability结构体cap,设置其中的一些参数,然后应用程序就可以使用这些参数了。包括设置名字为”mxc_v4l2”capabilities属性包括V4L2_CAP_VIDEO_CAPTURE

V4L2_CAP_VIDEO_OVERLAYV4L2_CAP_STREAMINGV4L2_CAP_READWRITE等等。


2.VIDIOC_S_INPUT

case VIDIOC_S_INPUT: { int *index = arg; pr_debug("   case VIDIOC_S_INPUT\n"); if (*index >= MXC_V4L2_CAPTURE_NUM_INPUTS) { retval = -EINVAL; break; } 

/*根据从从应用程序中传过来的arg参数来判断。MXC_V4L2_CAPTURE_NUM_INPUTS==2*/

if (*index == cam->current_input) break; 

/*init_camera_struct函数中将cam->current_input初始化为0了。如果要设置的input与当前input相同的话,就直接退出即可。*/

if ((mxc_capture_inputs[cam->current_input].status &     V4L2_IN_ST_NO_POWER) == 0) { retval = mxc_streamoff(cam); if (retval) break; mxc_capture_inputs[cam->current_input].status |= V4L2_IN_ST_NO_POWER; } 

/* mxc_capture_inputs[]这个数组如下所示:

static struct v4l2_input mxc_capture_inputs[MXC_V4L2_CAPTURE_NUM_INPUTS] = { {  .index = 0,  .name = "CSI IC MEM",  .type = V4L2_INPUT_TYPE_CAMERA,  .audioset = 0,  .tuner = 0,  .std = V4L2_STD_UNKNOWN,  .status = 0,  }, {  .index = 1,  .name = "CSI MEM",  .type = V4L2_INPUT_TYPE_CAMERA,  .audioset = 0,  .tuner = 0,  .std = V4L2_STD_UNKNOWN,  .status = V4L2_IN_ST_NO_POWER,  }, };

可以看出mxc_capture_inputs[0].status= 0,所以下面的语句不会执行。

*/

if (strcmp(mxc_capture_inputs[*index].name, "CSI MEM") == 0) { #if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE) retval = csi_enc_select(cam); if (retval) break; #endif } else if (strcmp(mxc_capture_inputs[*index].name,   "CSI IC MEM") == 0) { #if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE) retval = prp_enc_select(cam); if (retval) break; #endif } 

/*会根据mxc_capture_inputs[*index].name来选择执行哪一个函数,之前分析过,这两个函数都是去填充cam_data结构体中的几个函数指针。*/

mxc_capture_inputs[*index].status &= ~V4L2_IN_ST_NO_POWER; cam->current_input = *index; 

/*mxc_capture_inputs[*index].status位清0,然后将cam->current_input指向当前的mxc_capture_inputs[*index]*/

break; }

3. VIDIOC_CROPCAP

case VIDIOC_CROPCAP: { struct v4l2_cropcap *cap = arg; pr_debug("   case VIDIOC_CROPCAP\n"); if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&     cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) { retval = -EINVAL; break; } 

/*判断传入的v4l2_cropcap*captype是否是V4L2_BUF_TYPE_VIDEO_CAPTUREV4L2_BUF_TYPE_VIDEO_OVERLAY,应用程序中设置成了V4L2_BUF_TYPE_VIDEO_CAPTURE*/

cap->bounds = cam->crop_bounds; cap->defrect = cam->crop_defrect; 

/*将传入的v4l2_cropcap*cap中的boundsdefrect设置成cam->crop_boundscam->crop_defrect,这两个变量是在mxc_v4l_open函数中进行设置的。*/

break; }


4. VIDIOC_S_PARM

case VIDIOC_S_PARM:  { struct v4l2_streamparm *parm = arg; pr_debug("   case VIDIOC_S_PARM\n"); if (cam->sensor) retval = mxc_v4l2_s_param(cam, parm); else { pr_err("ERROR: v4l2 capture: slave not found!\n"); retval = -ENODEV; } break; }

这个函数跳转到mxc_v4l2_s_param中去执行了。

static int mxc_v4l2_s_param(cam_data *cam, struct v4l2_streamparm *parm) { struct v4l2_ifparm ifparm; struct v4l2_format cam_fmt; struct v4l2_streamparm currentparm; ipu_csi_signal_cfg_t csi_param; u32 current_fps, parm_fps; int err = 0; pr_debug("In mxc_v4l2_s_param\n"); if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { pr_err(KERN_ERR "mxc_v4l2_s_param invalid type\n"); return -EINVAL; } 

/*判断传入的参数parm->type是否为V4L2_BUF_TYPE_VIDEO_CAPTURE,所以肯定需要在应用程序中设置这一项。*/

/* Stop the viewfinder */ if (cam->overlay_on == true) stop_preview(cam); 

/*这一项在init_camera_struct函数初始化的时候设置为了false*/

currentparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* First check that this device can support the changes requested. */ err = vidioc_int_g_parm(cam->sensor, ¤tparm); if (err) { pr_err("%s: vidioc_int_g_parm returned an error %d\n", __func__, err); goto exit; } 

/*vidioc_int_g_parm函数,以ov5640.c为例,它最终会调用到里面的ioctl_g_parm函数。

static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { struct sensor_data *sensor = s->priv; struct v4l2_captureparm *cparm = &a->parm.capture; int ret = 0; switch (a->type) { /* This is the only case currently handled. */ case V4L2_BUF_TYPE_VIDEO_CAPTURE: memset(a, 0, sizeof(*a)); a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cparm->capability = sensor->streamcap.capability; cparm->timeperframe = sensor->streamcap.timeperframe; cparm->capturemode = sensor->streamcap.capturemode; ret = 0; break; /* These are all the possible cases. */ case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: ret = -EINVAL; break; default: pr_debug("   type is unknown - %d\n", a->type); ret = -EINVAL; break; } return ret; }

看这个函数,它最终的目的是根据cam->sensor->priv里面的值来设置currentparm->parm.capture结构体,而这个currentparm就是获取到的当前sensorparm参数。

*/

current_fps = currentparm.parm.capture.timeperframe.denominator / currentparm.parm.capture.timeperframe.numerator; parm_fps = parm->parm.capture.timeperframe.denominator / parm->parm.capture.timeperframe.numerator; 

/*设置current_fpsparm_fps这两个值,current_fps是当前每秒传输帧数,parm_fps是参数中设置的每秒传输帧数。*/

pr_debug("   Current capabilities are %x\n", currentparm.parm.capture.capability); pr_debug("   Current capturemode is %d  change to %d\n", currentparm.parm.capture.capturemode, parm->parm.capture.capturemode); pr_debug("   Current framerate is %d  change to %d\n", current_fps, parm_fps); /* This will change any camera settings needed. */ err = vidioc_int_s_parm(cam->sensor, parm); if (err) { pr_err("%s: vidioc_int_s_parm returned an error %d\n", __func__, err); goto exit; } 

/*这个vidioc_int_s_parm函数最终会调用到ov5640.c中的ioctl_s_parm函数。它如下所示:

static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { struct sensor_data *sensor = s->priv; struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe; u32 tgt_fps;/* target frames per secound */ enum ov5640_frame_rate frame_rate; int ret = 0; /* Make sure power on */ ov5640_power_down(0); switch (a->type) { /* This is the only case currently handled. */ case V4L2_BUF_TYPE_VIDEO_CAPTURE: /* Check that the new frame rate is allowed. */ if ((timeperframe->numerator == 0) ||     (timeperframe->denominator == 0)) { timeperframe->denominator = DEFAULT_FPS; timeperframe->numerator = 1; } tgt_fps = timeperframe->denominator /   timeperframe->numerator; if (tgt_fps > MAX_FPS) { timeperframe->denominator = MAX_FPS; timeperframe->numerator = 1; } else if (tgt_fps < MIN_FPS) { timeperframe->denominator = MIN_FPS; timeperframe->numerator = 1; } /* Actual frame rate we use */ tgt_fps = timeperframe->denominator /   timeperframe->numerator; if (tgt_fps == 15) frame_rate = ov5640_15_fps; else if (tgt_fps == 30) frame_rate = ov5640_30_fps; else { pr_err(" The camera frame rate is not supported!\n"); return -EINVAL; } ret = ov5640_change_mode(frame_rate, a->parm.capture.capturemode); if (ret < 0) return ret; sensor->streamcap.timeperframe = *timeperframe; sensor->streamcap.capturemode = a->parm.capture.capturemode; break; /* These are all the possible cases. */ case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: pr_debug("   type is not " \ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n", a->type); ret = -EINVAL; break; default: pr_debug("   type is unknown - %d\n", a->type); ret = -EINVAL; break; } return ret; }

这个函数首先判断应用程序中传进来的timeperframe->numeratortimeperframe->denominator两个参数,然后计算出tgt_fps。最后通过ov5640_change_mode函数来设置ov5640摄像头里面的寄存器的值,因为应用程序想要设置的是帧率等信息,这些信息会通过V4L2框架设置到摄像头里面,因为真正采集数据的是摄像头。

*/

/*综上来看,mxc_v4l2_s_param通过vidioc_int_g_parmvidioc_int_s_parm两个函数,来获取sensorparm设置,然后修改它。*/

/* If resolution changed, need to re-program the CSI */ /* Get new values. */ vidioc_int_g_ifparm(cam->sensor, &ifparm); 

/*通过上面的函数,已经将摄像头里面的信息修改了,但是应用程序还不知道,在这里调用vidioc_int_g_ifparm函数来获取ifparm的信息,然后告诉应用程序。如果分辨率改变的话需要重新修改CSI参数。调用vidioc_int_g_ifparm来获取sensorifparm参数。*/

csi_param.data_width = 0; csi_param.clk_mode = 0; csi_param.ext_vsync = 0; csi_param.Vsync_pol = 0; csi_param.Hsync_pol = 0; csi_param.pixclk_pol = 0; csi_param.data_pol = 0; csi_param.sens_clksrc = 0; csi_param.pack_tight = 0; csi_param.force_eof = 0; csi_param.data_en_pol = 0; csi_param.data_fmt = 0; csi_param.csi = cam->csi; csi_param.mclk = 0; pr_debug("   clock_curr=mclk=%d\n", ifparm.u.bt656.clock_curr); if (ifparm.u.bt656.clock_curr == 0) csi_param.clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; else csi_param.clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;  csi_param.pixclk_pol = ifparm.u.bt656.latch_clk_inv; if (ifparm.u.bt656.mode == V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT) { csi_param.data_width = IPU_CSI_DATA_WIDTH_8; } else if (ifparm.u.bt656.mode == V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT) { csi_param.data_width = IPU_CSI_DATA_WIDTH_10; } else { csi_param.data_width = IPU_CSI_DATA_WIDTH_8; } csi_param.Vsync_pol = ifparm.u.bt656.nobt_vs_inv; csi_param.Hsync_pol = ifparm.u.bt656.nobt_hs_inv; csi_param.ext_vsync = ifparm.u.bt656.bt_sync_correct; /* 设置csi_param的一些参数。 *//* if the capturemode changed, the size bounds will have changed. */ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt); pr_debug("   g_fmt_cap returns widthxheight of input as %d x %d\n", cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height); /* 通过vidioc_int_g_fmt_cap函数调用到ov5640.c中的ioctl_g_fmt_cap函数,如下:static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) { struct sensor_data *sensor = s->priv; f->fmt.pix = sensor->pix; return 0; }它获取了根据cam->sensor->priv来获取cam_fmt参数,保存在cam_fmt->fmt.pix中。 */csi_param.data_fmt = cam_fmt.fmt.pix.pixelformat; cam->crop_bounds.top = cam->crop_bounds.left = 0; cam->crop_bounds.width = cam_fmt.fmt.pix.width; cam->crop_bounds.height = cam_fmt.fmt.pix.height; /* 根据获得的cam_fmt参数继续设置csi_param的参数和cam->crop_bounds参数。 *//*  * Set the default current cropped resolution to be the same with  * the cropping boundary(except for tvin module).  */ if (cam->device_type != 1) { cam->crop_current.width = cam->crop_bounds.width; cam->crop_current.height = cam->crop_bounds.height; } /* This essentially loses the data at the left and bottom of the image  * giving a digital zoom image, if crop_current is less than the full  * size of the image. */ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width, cam->crop_current.height, cam->csi); ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,        cam->crop_current.top,        cam->csi); ipu_csi_init_interface(cam->ipu, cam->crop_bounds.width,        cam->crop_bounds.height,        cam_fmt.fmt.pix.pixelformat, csi_param);  exit: if (cam->overlay_on == true) start_preview(cam); return err; }

/*最后调用ipu_csi_set_window_sizeipu_csi_set_window_pos重新设置窗口的大小和位置,这两个函数都在mxc_v4l_open函数中分析了,在这就不再分析。然后继续调用ipu_csi_init_interface函数来设置底层的寄存器的值。*/


5.VIDIOC_S_FMT

case VIDIOC_S_FMT: { struct v4l2_format *sf = arg; pr_debug("   case VIDIOC_S_FMT\n"); retval = mxc_v4l2_s_fmt(cam, sf); break; }

它跳转到mxc_v4l2_s_fmt函数中去执行:

static int mxc_v4l2_s_fmt(cam_data *cam, struct v4l2_format *f) { int retval = 0; int size = 0; int bytesperline = 0; int *width, *height; pr_debug("In MVC: mxc_v4l2_s_fmt\n"); switch (f->type) {  //应用程序传进来的是V4L2_BUF_TYPE_VIDEO_CAPTUR。case V4L2_BUF_TYPE_VIDEO_CAPTURE: pr_debug("   type=V4L2_BUF_TYPE_VIDEO_CAPTURE\n"); if (!valid_mode(f->fmt.pix.pixelformat)) { pr_err("ERROR: v4l2 capture: mxc_v4l2_s_fmt: format "        "not supported\n"); return -EINVAL; } /*  * Force the capture window resolution to be crop bounds  * for CSI MEM input mode.  */ if (strcmp(mxc_capture_inputs[cam->current_input].name,    "CSI MEM") == 0) { f->fmt.pix.width = cam->crop_current.width; f->fmt.pix.height = cam->crop_current.height; } /* 设置 fmt.pix.width和 fmt.pix.height, cam->crop_current.width和 cam->crop_current.height这两个参数是在上一个ioctl(VIDIOC_S_PARM)调用中赋值的。 */if (cam->rotation >= IPU_ROTATE_90_RIGHT) { height = &f->fmt.pix.width; width = &f->fmt.pix.height; } else { width = &f->fmt.pix.width; height = &f->fmt.pix.height; } /* 根据 cam->rotation参数决定是否反转图像的宽度和高度。 *//* stride line limitation */ *width -= *width % 8; *height -= *height % 8; /* 宽度和高度都像8取整,这是对宽度和高度进行一些微调。 */if (*width == 0 || *height == 0) { pr_err("ERROR: v4l2 capture: width or height" " too small.\n"); return -EINVAL; } if ((cam->crop_current.width / *width > 8) ||     ((cam->crop_current.width / *width == 8) &&      (cam->crop_current.width % *width))) { *width = cam->crop_current.width / 8; if (*width % 8) *width += 8 - *width % 8; pr_err("ERROR: v4l2 capture: width exceeds limit " "resize to %d.\n",        *width); } if ((cam->crop_current.height / *height > 8) ||     ((cam->crop_current.height / *height == 8) &&      (cam->crop_current.height % *height))) { *height = cam->crop_current.height / 8; if (*height % 8) *height += 8 - *height % 8; pr_err("ERROR: v4l2 capture: height exceeds limit "        "resize to %d.\n",        *height); } /* 上面这两个判断应该是在反转的情况下调整宽度和高度的大小。 */switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_RGB565: size = f->fmt.pix.width * f->fmt.pix.height * 2; bytesperline = f->fmt.pix.width * 2; break; case V4L2_PIX_FMT_BGR24: size = f->fmt.pix.width * f->fmt.pix.height * 3; bytesperline = f->fmt.pix.width * 3; break; case V4L2_PIX_FMT_RGB24: size = f->fmt.pix.width * f->fmt.pix.height * 3; bytesperline = f->fmt.pix.width * 3; break; case V4L2_PIX_FMT_BGR32: size = f->fmt.pix.width * f->fmt.pix.height * 4; bytesperline = f->fmt.pix.width * 4; break; case V4L2_PIX_FMT_RGB32: size = f->fmt.pix.width * f->fmt.pix.height * 4; bytesperline = f->fmt.pix.width * 4; break; case V4L2_PIX_FMT_YUV422P: size = f->fmt.pix.width * f->fmt.pix.height * 2; bytesperline = f->fmt.pix.width; break; case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YUYV: size = f->fmt.pix.width * f->fmt.pix.height * 2; bytesperline = f->fmt.pix.width * 2; break; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2; bytesperline = f->fmt.pix.width; break; case V4L2_PIX_FMT_NV12: size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2; bytesperline = f->fmt.pix.width; break; default: break; } /* 根据fmt.pix.pixelformat的格式来计算size和bytesperline的大小。这个size就是指图像的大小,bytesperline就是指一行数据所占用的字节数。 */if (f->fmt.pix.bytesperline < bytesperline) f->fmt.pix.bytesperline = bytesperline; else bytesperline = f->fmt.pix.bytesperline; if (f->fmt.pix.sizeimage < size) f->fmt.pix.sizeimage = size; else size = f->fmt.pix.sizeimage; cam->v2f.fmt.pix = f->fmt.pix; if (cam->v2f.fmt.pix.priv != 0) { if (copy_from_user(&cam->offset,    (void *)cam->v2f.fmt.pix.priv,    sizeof(cam->offset))) { retval = -EFAULT; break; } } break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: pr_debug("   type=V4L2_BUF_TYPE_VIDEO_OVERLAY\n"); retval = verify_preview(cam, &f->fmt.win); cam->win = f->fmt.win; break; default: retval = -EINVAL; } pr_debug("End of %s: v2f pix widthxheight %d x %d\n",  __func__,  cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height); pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",  __func__,  cam->crop_bounds.width, cam->crop_bounds.height); pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",  __func__,  cam->crop_defrect.width, cam->crop_defrect.height); pr_debug("End of %s: crop_current widthxheight %d x %d\n",  __func__,  cam->crop_current.width, cam->crop_current.height); return retval; }

注意上面我标红的代码:cam->v2f.fmt.pix=f->fmt.pix;在这个函数中,对于应用程序中传入的参数,或者修改的参数,到此为止,都已经设置完毕了。然后在这个函数中,对cam->v2f结构体进行赋值,这个结构体里面的cam->v2f.fmt.pix里面包含widthheightpixelformatbytesperlinesizeimage等等信息,这些信息在后面申请内存大小等函数中都会使用到。


6.VIDIOC_G_FMT

case VIDIOC_G_FMT: { struct v4l2_format *gf = arg; pr_debug("   case VIDIOC_G_FMT\n"); retval = mxc_v4l2_g_fmt(cam, gf); break; }
static int mxc_v4l2_g_fmt(cam_data *cam, struct v4l2_format *f) { int retval = 0; pr_debug("In MVC: mxc_v4l2_g_fmt type=%d\n", f->type); switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: pr_debug("   type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n"); f->fmt.pix = cam->v2f.fmt.pix; break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: pr_debug("   type is V4L2_BUF_TYPE_VIDEO_OVERLAY\n"); f->fmt.win = cam->win; break; default: pr_debug("   type is invalid\n"); retval = -EINVAL; } pr_debug("End of %s: v2f pix widthxheight %d x %d\n",  __func__,  cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height); pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",  __func__,  cam->crop_bounds.width, cam->crop_bounds.height); pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",  __func__,  cam->crop_defrect.width, cam->crop_defrect.height); pr_debug("End of %s: crop_current widthxheight %d x %d\n",  __func__,  cam->crop_current.width, cam->crop_current.height); return retval; }

这个宏没什么意思,在上一个宏VIDIOC_S_FMT中,对于设置的那些信息,最终定下来了,应用程序需要调用这个VIDIOC_G_FMT宏来获取v4l2_formatfmt的信息等,这后面需要使用到这些信息。


7.VIDIOC_REQBUFS

case VIDIOC_REQBUFS: { struct v4l2_requestbuffers *req = arg; pr_debug("   case VIDIOC_REQBUFS\n"); if (req->count > FRAME_NUM) { pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "        "not enough buffers\n"); req->count = FRAME_NUM; } /* 判断申请的buffer数目是否超过最大的数目。 */if ((req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) { pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "        "wrong buffer type\n"); retval = -EINVAL; break; } mxc_streamoff(cam); 

/*这个函数同样也在这个文件中:

static int mxc_streamoff(cam_data *cam) { int err = 0; pr_debug("In MVC:mxc_streamoff\n"); if (cam->capture_on == false) return 0; /* For both CSI--MEM and CSI--IC--MEM  * 1. wait for idmac eof  * 2. disable csi first  * 3. disable idmac  * 4. disable smfc (CSI--MEM channel)  */ if (mxc_capture_inputs[cam->current_input].name != NULL) { if (cam->enc_disable_csi) { err = cam->enc_disable_csi(cam); if (err != 0) return err; } if (cam->enc_disable) { err = cam->enc_disable(cam); if (err != 0) return err; } } mxc_free_frames(cam); mxc_capture_inputs[cam->current_input].status |= V4L2_IN_ST_NO_POWER; cam->capture_on = false; return err; }

它通过调用cam->enc_disable_csi(cam)cam->enc_disable(cam)这两个函数来关闭csi(这两个函数都在ipu_csi_enc.c中),然后调用mxc_free_frames(cam);函数来释放掉framebuffer的状态,主要是通过

cam->frame[i].buffer.flags= V4L2_BUF_FLAG_MAPPED;来修改flags参数。

*/

if (req->memory & V4L2_MEMORY_MMAP) { mxc_free_frame_buf(cam); //释放掉内存,主要是dma_free_coherent函数。retval = mxc_allocate_frame_buf(cam, req->count); } break; }

/*最后调用mxc_allocate_frame_buf函数来重新分配内存。函数如下:

static int mxc_allocate_frame_buf(cam_data *cam, int count) { int i; pr_debug("In MVC:mxc_allocate_frame_buf - size=%d\n", cam->v2f.fmt.pix.sizeimage); for (i = 0; i < count; i++) { cam->frame[i].vaddress =     dma_alloc_coherent(0,        PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),        &cam->frame[i].paddress,        GFP_DMA | GFP_KERNEL); if (cam->frame[i].vaddress == 0) { pr_err("ERROR: v4l2 capture: " "mxc_allocate_frame_buf failed.\n"); mxc_free_frame_buf(cam); return -ENOBUFS; } cam->frame[i].buffer.index = i; cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED; cam->frame[i].buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cam->frame[i].buffer.length =     PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage); cam->frame[i].buffer.memory = V4L2_MEMORY_MMAP; cam->frame[i].buffer.m.offset = cam->frame[i].paddress; cam->frame[i].index = i; } return 0; }

这个函数并不是太难理解,根据应用程序传进来的想要申请的buffer数目,调用dma_alloc_coherent来分配内存,然后设置这些内存的参数等,注意此时的cam->frame[i].buffer.flags的类型是V4L2_BUF_FLAG_MAPPED

*/


8.mmap函数:

static int mxc_mmap(struct file *file, struct vm_area_struct *vma) { struct video_device *dev = video_devdata(file); unsigned long size; int res = 0; cam_data *cam = video_get_drvdata(dev); pr_debug("In MVC:mxc_mmap\n"); pr_debug("   pgoff=0x%lx, start=0x%lx, end=0x%lx\n",  vma->vm_pgoff, vma->vm_start, vma->vm_end); /* make this _really_ smp-safe */ if (down_interruptible(&cam->busy_lock)) return -EINTR; size = vma->vm_end - vma->vm_start; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start,     vma->vm_pgoff, size, vma->vm_page_prot)) { pr_err("ERROR: v4l2 capture: mxc_mmap: " "remap_pfn_range failed\n"); res = -ENOBUFS; goto mxc_mmap_exit; } vma->vm_flags &= ~VM_IO;/* using shared anonymous pages */ mxc_mmap_exit: up(&cam->busy_lock); return res; }


9.VIDIOC_QUERYBUF

case VIDIOC_QUERYBUF: { struct v4l2_buffer *buf = arg; int index = buf->index; pr_debug("   case VIDIOC_QUERYBUF\n"); if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { pr_err("ERROR: v4l2 capture: "        "VIDIOC_QUERYBUFS: "        "wrong buffer type\n"); retval = -EINVAL; break; } if (buf->memory & V4L2_MEMORY_MMAP) { memset(buf, 0, sizeof(buf)); buf->index = index; } down(&cam->param_lock); if (buf->memory & V4L2_MEMORY_USERPTR) { mxc_v4l2_release_bufs(cam); retval = mxc_v4l2_prepare_bufs(cam, buf); } if (buf->memory & V4L2_MEMORY_MMAP) retval = mxc_v4l2_buffer_status(cam, buf); up(&cam->param_lock); break; }

这个函数先进行了一些判断,然后最终调用的是mxc_v4l2_buffer_status这个函数,如下:

static int mxc_v4l2_buffer_status(cam_data *cam, struct v4l2_buffer *buf) { pr_debug("In MVC:mxc_v4l2_buffer_status\n"); if (buf->index < 0 || buf->index >= FRAME_NUM) { pr_err("ERROR: v4l2 capture: mxc_v4l2_buffer_status buffers "        "not allocated\n"); return -EINVAL; } memcpy(buf, &(cam->frame[buf->index].buffer), sizeof(*buf)); return 0; }

这个函数也是进行了一些判断,然后调用memcpy函数,将上面VIDIOC_REQBUFS宏中通过mxc_allocate_frame_buf函数分配的内存拷贝给buf,返回给应用程序即可。


10.VIDIOC_QBUF

case VIDIOC_QBUF: { struct v4l2_buffer *buf = arg; int index = buf->index; pr_debug("   case VIDIOC_QBUF\n"); spin_lock_irqsave(&cam->queue_int_lock, lock_flags); if ((cam->frame[index].buffer.flags & 0x7) ==     V4L2_BUF_FLAG_MAPPED) { cam->frame[index].buffer.flags |=     V4L2_BUF_FLAG_QUEUED; list_add_tail(&cam->frame[index].queue,       &cam->ready_q); } else if (cam->frame[index].buffer.    flags & V4L2_BUF_FLAG_QUEUED) { pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "        "buffer already queued\n"); retval = -EINVAL; } else if (cam->frame[index].buffer.    flags & V4L2_BUF_FLAG_DONE) { pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "        "overwrite done buffer.\n"); cam->frame[index].buffer.flags &=     ~V4L2_BUF_FLAG_DONE; cam->frame[index].buffer.flags |=     V4L2_BUF_FLAG_QUEUED; retval = -EINVAL; } buf->flags = cam->frame[index].buffer.flags; spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags); break; }

这个函数主要就是根据cam->frame[index].buffer.flag的不同来选择进行不同的操作,理论上来讲,走到这一步的cam->frame[index].buffer.flag应该是V4L2_BUF_FLAG_MAPPED,那么这个VIDIOC_QBUF需要将它的flags添加一个V4L2_BUF_FLAG_QUEUED属性,然后将这个buffer添加到cam->ready_q队列中。

如果cam->frame[index].buffer.flagV4L2_BUF_FLAG_QUEUED的话,就直接返回EINVAL

如果cam->frame[index].buffer.flagV4L2_BUF_FLAG_DONE的话,就先将flagsV4L2_BUF_FLAG_DONE位清零,然后重新设置为V4L2_BUF_FLAG_QUEUED,然后返回EINVAL


11.VIDIOC_STREAMON

qbuf以后就可以开始传输数据了~

case VIDIOC_STREAMON: { pr_debug("   case VIDIOC_STREAMON\n"); retval = mxc_streamon(cam); break; }

static int mxc_streamon(cam_data *cam) { struct mxc_v4l_frame *frame; unsigned long lock_flags; int err = 0; pr_debug("In MVC:mxc_streamon\n"); if (NULL == cam) { pr_err("ERROR! cam parameter is NULL\n"); return -1; } if (cam->capture_on) { pr_err("ERROR: v4l2 capture: Capture stream has been turned "        " on\n"); return -1; } if (list_empty(&cam->ready_q)) { pr_err("ERROR: v4l2 capture: mxc_streamon buffer has not been " "queued yet\n"); return -EINVAL; } if (cam->enc_update_eba && cam->ready_q.prev == cam->ready_q.next) { pr_err("ERROR: v4l2 capture: mxc_streamon buffer need "        "ping pong at least two buffers\n"); return -EINVAL; } /* 至少保证有2个buffer才能够传输数据。 */cam->capture_pid = current->pid; if (cam->overlay_on == true) stop_preview(cam); /* 如果打开了overlay的话,就先关闭preview. */if (cam->enc_enable) { err = cam->enc_enable(cam); if (err != 0) return err; } /* 通过调用cam->enc_enable函数使能译码任务。在mxc_v4l_open函数中,通过csi_enc_select函数或者prp_enc_select函数来为cam_data结构体里面的这几个函数指针 * 赋值了。具体的操作会跳转到ipu_prp_enc.c或者ipu_csi_enc.c文件中去执行,具体后面再分析。 */spin_lock_irqsave(&cam->queue_int_lock, lock_flags); cam->ping_pong_csi = 0; cam->local_buf_num = 0; if (cam->enc_update_eba) { frame =     list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue); list_del(cam->ready_q.next); list_add_tail(&frame->queue, &cam->working_q); frame->ipu_buf_num = cam->ping_pong_csi; err = cam->enc_update_eba(cam, frame->buffer.m.offset); frame =     list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue); list_del(cam->ready_q.next); list_add_tail(&frame->queue, &cam->working_q); frame->ipu_buf_num = cam->ping_pong_csi; err |= cam->enc_update_eba(cam, frame->buffer.m.offset); spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags); } else { spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags); return -EINVAL; } /* 对这两个buffer进行操作,首先根据 cam->ready_q.next找到包含它的frame,然后删除 cam->ready_q.next队列中的最后一项,将frame添加到 cam->working_q队列中, * 然后调用 cam->enc_update_eba函数来更新buffer的地址。在这里有两个参数ping_pong_csi和local_buf_num,对于这个buffer更新的过程,在后面的《应用程序和驱动程序 * 中buffer的传输流程》文件中详细分析。个人感觉,这个buffer地址的更新过程是一个重点。 */if (cam->overlay_on == true) start_preview(cam); /* 如果打开了overlay的话,继续开启preview。 */if (cam->enc_enable_csi) { err = cam->enc_enable_csi(cam); if (err != 0) return err; } /* 调用cam->enc_enable_csi函数使能csi。 */cam->capture_on = true; return err; }

12.VIDIOC_DQBUF

当开始数据传输以后,当有数据填充满一个buffer,就可以将这个buffer出队了,应用程序会调用到VIDIOC_DQBUF这个ioctl函数:

case VIDIOC_DQBUF: { struct v4l2_buffer *buf = arg; pr_debug("   case VIDIOC_DQBUF\n"); if ((cam->enc_counter == 0) && (file->f_flags & O_NONBLOCK)) { retval = -EAGAIN; break; } retval = mxc_v4l_dqueue(cam, buf); break; }
static int mxc_v4l_dqueue(cam_data *cam, struct v4l2_buffer *buf) { int retval = 0; struct mxc_v4l_frame *frame; unsigned long lock_flags; pr_debug("In MVC:mxc_v4l_dqueue\n"); if (!wait_event_interruptible_timeout(cam->enc_queue,       cam->enc_counter != 0,       10 * HZ)) { pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue timeout " "enc_counter %x\n",        cam->enc_counter); return -ETIME; } else if (signal_pending(current)) { pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue() " "interrupt received\n"); return -ERESTARTSYS; } /* 等待 cam->enc_queue这个队列唤醒,当有填充满的buffer的时候,就会产生一个中断,然后在中断处理函数camera_callback函数中,唤醒这个队列。 */if (down_interruptible(&cam->busy_lock)) return -EBUSY; spin_lock_irqsave(&cam->dqueue_int_lock, lock_flags); cam->enc_counter--; /* 每次执行一次这个函数,enc_queue队列中的buffer数目就会减少1,这个引用计数就减少1. */frame = list_entry(cam->done_q.next, struct mxc_v4l_frame, queue); list_del(cam->done_q.next); /* 首先从 cam->done_q队列中取出 queue所对应的frame,然后将cam->done_q队列的最后一项删除。 */if (frame->buffer.flags & V4L2_BUF_FLAG_DONE) { frame->buffer.flags &= ~V4L2_BUF_FLAG_DONE; } else if (frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) { pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: " "Buffer not filled.\n"); frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; retval = -EINVAL; } else if ((frame->buffer.flags & 0x7) == V4L2_BUF_FLAG_MAPPED) { pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: " "Buffer not queued.\n"); retval = -EINVAL; } /* 然后根据buffer的flags参数来选择执行的步骤,(但是在这有点疑问, V4L2_BUF_FLAG_DONE不知的在什么时候设置的,这的意思是buffer到这的时候,flags应该 * 是 V4L2_BUF_FLAG_DONE,但是不知的是在哪设置的。对于这个问题,ipu采用的方法是,申请一个中断,当填充满一个buffer的时候,就会触发中断,在中断处理 * 函数camera_callback中处理这些事情。这个流程在后面的buf流程中会详细讲解,这里先标记一下,毕竟在讲ioctl函数~)如果flags参数是 V4L2_BUF_FLAG_DONE的 * 话,就将它清零,如果是 V4L2_BUF_FLAG_QUEUED的话,就说明buffer没有被填充,如果是 V4L2_BUF_FLAG_MAPPED的话,说明buffer没有被qbuf。 */cam->frame[frame->index].buffer.field = cam->device_type ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE; buf->bytesused = cam->v2f.fmt.pix.sizeimage; buf->index = frame->index; buf->flags = frame->buffer.flags; buf->m = cam->frame[frame->index].buffer.m; buf->timestamp = cam->frame[frame->index].buffer.timestamp; buf->field = cam->frame[frame->index].buffer.field; spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags); up(&cam->busy_lock); return retval; }/* 将bytesused, index等等信息记录在buf里面,这个buf能够返回给应用程序。重要的是buf->m这个结构体,里面包含应用程序所需要的buffer的物理地址,应用程序去那里取摄像头采集到的数据。 */

13.VIDIOC_STREAMOFF

case VIDIOC_STREAMOFF: { pr_debug("   case VIDIOC_STREAMOFF\n"); retval = mxc_streamoff(cam); break; }
static int mxc_streamoff(cam_data *cam) { int err = 0; pr_debug("In MVC:mxc_streamoff\n"); if (cam->capture_on == false) return 0; /* For both CSI--MEM and CSI--IC--MEM  * 1. wait for idmac eof  * 2. disable csi first  * 3. disable idmac  * 4. disable smfc (CSI--MEM channel)  */ if (mxc_capture_inputs[cam->current_input].name != NULL) { if (cam->enc_disable_csi) { err = cam->enc_disable_csi(cam); if (err != 0) return err; } if (cam->enc_disable) { err = cam->enc_disable(cam); if (err != 0) return err; } } mxc_free_frames(cam); mxc_capture_inputs[cam->current_input].status |= V4L2_IN_ST_NO_POWER; cam->capture_on = false; return err; }

看注释写的很清楚,先等待idmac结束,然后先关掉csi,然关闭idmac,再关掉CSI--MEMchannel,关闭csi通过enc_disable_csi函数来实现的,其他3项任务是通过enc_disable函数里面调用的csi_enc_disabling_tasks函数来完成的。最后调用mxc_free_frames函数来清除掉frame的状态参数。


0 0
原创粉丝点击