Opencv学习笔记二(边缘检测及坐标转换)

来源:互联网 发布:水质监测数据分析 编辑:程序博客网 时间:2024/05/29 19:47

e.g.1

// 播放视频,创建滑动条实时显示帧数,滑动条可以快进//IplImage* deal(IplImage* img)对图像进行高斯处理//IplImage* doPyrDown(IplImage* in, int fileter = IPL_GAUSSIAN_5x5)对图像进行缩放//IplImage* doCanny(IplImage *in, double lowThresh, double highThresh, double aperture)对图像进行边缘检测#include "stdafx.h"#include "highgui.h"#include "cv.h"#include "stdlib.h"using namespace std;int g_slider_position = 0;CvCapture* g_caputre = NULL;int n = 0;void onTrackbarSlide(int pos){//回调函数,当滑动条被拖动时cvSetCaptureProperty(g_caputre,CV_CAP_PROP_POS_FRAMES,pos);//获取拖动后的位置,快进到对应帧数n = pos;//保存将现在的帧数}IplImage* deal(IplImage* img)//高斯平滑处理函数{IplImage* out = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);//创建一个图像cvSmooth(img,out,CV_GAUSSIAN,7,7);//对每个图像3*3区域进行高斯平滑处理return out;}IplImage* doPyrDown(IplImage* in, int fileter = IPL_GAUSSIAN_5x5)//缩放{assert(in->width % 2 == 0 && in->height % 2 == 0);IplImage* out = cvCreateImage(cvSize(in->width/2, in->height/2),in->depth,in->nChannels);cvPyrDown(in,out);return out;}IplImage* doCanny(IplImage *in, double lowThresh, double highThresh, double aperture)  //边缘检测函数{IplImage* out = cvCreateImage(cvGetSize(in), IPL_DEPTH_8U, 1);//定义一个输出图像结构体指针if (in->nChannels != 1)//如果输入图像为多通道,则先转为单通道,因为该函数只对单通道作用{IplImage* out1 = cvCreateImage(cvGetSize(in), IPL_DEPTH_8U, 1);//单通道中间变量cvCvtColor(in, out1, CV_BGR2GRAY); //彩色图转灰度图,即多通道转单通道cvCanny(out1, out, lowThresh, highThresh, aperture); //输入、输出图像,两个阈值,Sobel算子内核大小cvReleaseImage(&out1); //释放中间使用的内存}elsecvCanny(in, out, lowThresh, highThresh, aperture);//若本身就是单通道则直接转换return out;}int _tmain(int argc, _TCHAR* argv[]){cvNamedWindow("Example2", CV_WINDOW_AUTOSIZE);g_caputre = cvCreateFileCapture("D:\\test.avi");//生成一个指向视频文件的结构指针int frames = (int)cvGetCaptureProperty(g_caputre, CV_CAP_PROP_FRAME_COUNT);//获取文件总帧数if (frames!= 0)//如果帧数不为0(对于有些编码格式获取不到帧数){cvCreateTrackbar("position", "Example2", &g_slider_position, frames, onTrackbarSlide);//创建滑动条}IplImage* frame;//声明一个图像结构体指针while (1){frame = cvQueryFrame(g_caputre);//更新帧if (!frame)break;cvSetTrackbarPos("position","Example2",n++);//随着帧数的递增自动修改滑动条位置IplImage* out = deal(frame);//进行平滑处理//IplImage* out = doPyrDown(frame);//进行缩放处理//IplImage* out = doCanny(frame,10,100,3);//进行边缘检测cvShowImage("Example2", out);cvReleaseImage(&out);char c = cvWaitKey(33);if (c == 27)break;}cvReleaseCapture(&g_caputre);cvDestroyWindow("Example2");system("pause");return 0;}


e.g.2

// 读取视频文件,进行极坐标转换并且转换为灰度图像,写入另外一个视频文件#include "stdafx.h"#include "cv.h"#include "highgui.h"using namespace std;int _tmain(int argc, _TCHAR* argv[]){CvCapture* caputre = NULL;caputre = cvCreateFileCapture("D:\\test.avi");//生成一个指向视频文件的结构指针if (!caputre){return -1;}IplImage* bgr_frame = cvQueryFrame(caputre);//获取该视频文件的一帧图像初始化图像指针double fps = cvGetCaptureProperty(caputre, CV_CAP_PROP_FPS);//获取视频帧率:每秒显示的帧数CvSize size = cvSize((int)cvGetCaptureProperty(caputre,CV_CAP_PROP_FRAME_WIDTH),//获取帧宽度(int)cvGetCaptureProperty(caputre,CV_CAP_PROP_FRAME_HEIGHT)//获取帧高度);CvVideoWriter* writer = cvCreateVideoWriter(//创建一个写入设备"test3.avi",//目标文件名//CV_FOURCC('M', 'J', 'P', 'G'),//编码格式,写入灰度图像时要改为-1-1,fps,    //帧率size//帧大小);IplImage* logpolar_frame = cvCreateImage(//创建一个目标图像结构size,IPL_DEPTH_8U,3);while ((bgr_frame = cvQueryFrame(caputre)) != NULL){cvLogPolar(//将图像由笛卡尔坐标转为极坐标为参照bgr_frame,//原图像logpolar_frame,//目标图像cvPoint2D32f(bgr_frame->width / 2, bgr_frame->height / 2),//设置变换中心20,//幅度的尺度参数CV_INTER_LINEAR + CV_WARP_INVERSE_MAP//可选项 = 插值方法+选择标志);IplImage* out1 = cvCreateImage(cvGetSize(logpolar_frame), IPL_DEPTH_8U, 1);//单通道中间变量cvCvtColor(logpolar_frame, out1, CV_BGR2GRAY);//彩色图转灰度图int nRet = cvWriteFrame(writer,out1);//写入到目标视频文件}cvReleaseVideoWriter(&writer);cvReleaseImage(&logpolar_frame);cvReleaseCapture(&caputre);return 0;}/*插入方法INTER_NEAREST - 最近邻插值INTER_LINEAR - 线性插值(默认值)INTER_AREA - 区域插值(利用像素区域关系的重采样插值)INTER_CUBIC –三次样条插值(超过4×4像素邻域内的双三次插值)INTER_LANCZOS4 -Lanczos插值(超过8×8像素邻域的Lanczos插值)*/

参考《学习OpenCV》

0 0