javaandroid中opencv的使用

来源:互联网 发布:凌慕枫叶知秋全文免费 编辑:程序博客网 时间:2024/05/29 02:09

opencv使用帮助
http://docs.opencv.org/

opencv图片灰度化:

package com.testopencv.haveimgfun;  public class LibImgFun {   static {            System.loadLibrary("ImgFun");           }          /**              * @param width the current view width              * @param height the current view height  */      public static native int[] ImgFun(int[] buf, int w, int h);   } class ClickEvent implements View.OnClickListener {          public void onClick(View v) {              if (v == btnNDK) {                  long current = System.currentTimeMillis();                  Bitmap img1 = ((BitmapDrawable) getResources().getDrawable(                          R.drawable.lena)).getBitmap();                  int w = img1.getWidth(), h = img1.getHeight();                  int[] pix = new int[w * h];                  img1.getPixels(pix, 0, w, 0, 0, w, h);                  int[] resultInt = LibImgFun.ImgFun(pix, w, h);                  Bitmap resultImg = Bitmap.createBitmap(w, h, Config.RGB_565);                  resultImg.setPixels(resultInt, 0, w, 0, 0, w, h);                  long performance = System.currentTimeMillis() - current;                  imgView.setImageBitmap(resultImg);                  HaveImgFun.this.setTitle("w:" + String.valueOf(img1.getWidth())                          + ",h:" + String.valueOf(img1.getHeight()) + "NDK耗时"                          + String.valueOf(performance) + " 毫秒");              } else if (v == btnRestore) {                  Bitmap img2 = ((BitmapDrawable) getResources().getDrawable(                          R.drawable.lena)).getBitmap();                  imgView.setImageBitmap(img2);                  HaveImgFun.this.setTitle("使用OpenCV进行图像处理");              }          }  

LibImgFun代码:

#include <jni.h>#include <stdio.h>#include <stdlib.h>#include <opencv2/opencv.hpp>using namespace cv;IplImage * change4channelTo3InIplImage(IplImage * src);extern "C" {JNIEXPORT jintArray JNICALL Java_com_testopencv_haveimgfun_LibImgFun_ImgFun(        JNIEnv* env, jobject obj, jintArray buf, int w, int h);JNIEXPORT jintArray JNICALL Java_com_testopencv_haveimgfun_LibImgFun_ImgFun(        JNIEnv* env, jobject obj, jintArray buf, int w, int h) {    jint *cbuf;    cbuf = env->GetIntArrayElements(buf, false);    if (cbuf == NULL) {        return 0;    }    Mat myimg(h, w, CV_8UC4, (unsigned char*) cbuf);    IplImage image=IplImage(myimg);    IplImage* image3channel = change4channelTo3InIplImage(&image);    IplImage* pCannyImage=cvCreateImage(cvGetSize(image3channel),IPL_DEPTH_8U,1);    cvCanny(image3channel,pCannyImage,50,150,3);    int* outImage=new int[w*h];    for(int i=0;i<w*h;i++)    {        outImage[i]=(int)pCannyImage->imageData[i];    }    int size = w * h;    jintArray result = env->NewIntArray(size);    env->SetIntArrayRegion(result, 0, size, outImage);    env->ReleaseIntArrayElements(buf, cbuf, 0);    return result;}}IplImage * change4channelTo3InIplImage(IplImage * src) {    if (src->nChannels != 4) {        return NULL;    }    IplImage * destImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);    for (int row = 0; row < src->height; row++) {        for (int col = 0; col < src->width; col++) {            CvScalar s = cvGet2D(src, row, col);            cvSet2D(destImg, row, col, s);        }    }    return destImg;}

来自:http://blog.csdn.net/watkinsong/article/details/9849973

二值化:

#include "opencv2/opencv.hpp"  using namespace cv;  void Java_com_test_MainActivity(JNIEnv* env jclass jthis){      Mat mat = imread("/test.jpg");      mat = mat >50;    //二值化     imwrite("out.jpg",mat);  }  

人脸检测:

package com.tcl.uviewer.features.featuresImpl;import org.opencv.core.Core;import org.opencv.core.Mat;import org.opencv.core.MatOfRect;import org.opencv.core.Point;import org.opencv.core.Scalar;import org.opencv.highgui.Highgui;import org.opencv.objdetect.CascadeClassifier;public class Test{public static void detectFace(String imagePath){System.out.println("\nRunning DetectFaceDemo");// 导入opencv的库System.loadLibrary(Core.NATIVE_LIBRARY_NAME);// 从配置文件lbpcascade_frontalface.xml中创建一个人脸识别器,该文件位于opencv安装目录中CascadeClassifier faceDetector = new CascadeClassifier("C:/lbpcascade_frontalface.xml");Mat image = Highgui.imread(imagePath);// 在图片中检测人脸MatOfRect faceDetections = new MatOfRect();faceDetector.detectMultiScale(image, faceDetections);System.out.println(String.format("Detected %s faces",faceDetections.toArray().length));// 在每一个识别出来的人脸周围画出一个方框for (org.opencv.core.Rect rect : faceDetections.toArray()){Core.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x+ rect.width, rect.y + rect.height), new Scalar(0, 255, 0));}// 将结果保存到文件String filename = "faceDetection.png";System.out.println(String.format("Writing %s", filename));Highgui.imwrite(filename, image);}public static void main(String[] args){String imagePath = "C:/005.jpg";Test.detectFace(imagePath);}}
 System.loadLibrary(Core.NATIVE_LIBRARY_NAME);        System.out.println("\nRunning FaceDetector");        CascadeClassifier faceDetector = new CascadeClassifier(FaceDetector.class.getResource("haarcascade_frontalface_alt.xml").getPath());        Mat image = Highgui                .imread(FaceDetector.class.getResource("shekhar.JPG").getPath());        MatOfRect faceDetections = new MatOfRect();        faceDetector.detectMultiScale(image, faceDetections);        System.out.println(String.format("Detected %s faces", faceDetections.toArray().length));        for (Rect rect : faceDetections.toArray()) {            Core.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height),                    new Scalar(0, 255, 0));        }        String filename = "ouput.png";        System.out.println(String.format("Writing %s", filename));        Highgui.imwrite(filename, image);

识别结果:
这里写图片描述
来源:
http://yuanhuan.blog.51cto.com/3367116/1301368
http://yjdingkai.iteye.com/blog/1532450
http://www.cnblogs.com/endless-on/p/3491154.html

人脸识别匹配代码:

public double CmpPic(String path) {          int l_bins = 20;          int hist_size[] = { l_bins };          float v_ranges[] = { 0, 100 };          float ranges[][] = { v_ranges };          IplImage Image1 = cvLoadImage(Environment.getExternalStorageDirectory()                  + "/FaceDetect/faceDone.jpg", CV_LOAD_IMAGE_GRAYSCALE);          IplImage Image2 = cvLoadImage(path, CV_LOAD_IMAGE_GRAYSCALE);          IplImage imageArr1[] = { Image1 };          IplImage imageArr2[] = { Image2 };          CvHistogram Histogram1 = CvHistogram.create(1, hist_size,                  CV_HIST_ARRAY, ranges, 1);          CvHistogram Histogram2 = CvHistogram.create(1, hist_size,                  CV_HIST_ARRAY, ranges, 1);          cvCalcHist(imageArr1, Histogram1, 0, null);          cvCalcHist(imageArr2, Histogram2, 0, null);          cvNormalizeHist(Histogram1, 100.0);          cvNormalizeHist(Histogram2, 100.0);          return cvCompareHist(Histogram1, Histogram2, CV_COMP_CORREL);      }  

参考:http://blog.csdn.net/sky286753213/article/details/11887913
demo下载地址:http://download.csdn.net/detail/sky286753213/6617075

androidstudio opencv demo:
https://github.com/quanhua92/OpenCV_Java_AndroidStudio

how to use opencv-java in androidstudio?:
http://www.quan404.com/2015/07/how-to-use-opencv-android-in-android.html

opencv的匹配图片:

   mMatches为识别结果    /**     *     * 对场景图片帧进行相关处理     * 其中在这里获取匹配对     * 然后绘制获取的线框     */    @Override    public void apply(final Mat src, final Mat dst) {        Imgproc.cvtColor(src, mGraySrc, Imgproc.COLOR_RGBA2GRAY);        mFeatureDetector.detect(mGraySrc, mSceneKeypoints);        mDescriptorExtractor.compute(mGraySrc, mSceneKeypoints,                mSceneDescriptors);        mDescriptorMatcher.match(mSceneDescriptors,                mReferenceDescriptors, mMatches);        findSceneCorners();        draw(src, dst);    }    private void findSceneCorners() {        List<DMatch> matchesList = mMatches.toList();        // 匹配对太少        if (matchesList.size() < 4) {            // There are too few matches to find the homography.            return;        }               // 将MatOfKeyPoint数据结构存储的特征点数据转换成List,便于后面获取        List<KeyPoint> referenceKeypointsList =                mReferenceKeypoints.toList();        List<KeyPoint> sceneKeypointsList =                mSceneKeypoints.toList();        // Calculate the max and min distances between keypoints.        // 计算特征点之间的最大和最小距离        double maxDist = 0.0;        double minDist = Double.MAX_VALUE;        for(DMatch match : matchesList) {            double dist = match.distance;            if (dist < minDist) {                minDist = dist;            }            if (dist > maxDist) {                maxDist = dist;            }        }        // The thresholds for minDist are chosen subjectively        // based on testing. The unit is not related to pixel        // distances; it is related to the number of failed tests        // for similarity between the matched descriptors.        // 根据距离对角点进行取舍        if (minDist > 50.0) {            // The target is completely lost.            // Discard any previously found corners.            mSceneCorners.create(0, 0, mSceneCorners.type());            return;        } else if (minDist > 25.0) {            // The target is lost but maybe it is still close.            // Keep any previously found corners.            return;        }        // Identify "good" keypoints based on match distance.        ArrayList<Point> goodReferencePointsList =                new ArrayList<Point>();        ArrayList<Point> goodScenePointsList =                new ArrayList<Point>();        // 最佳距离极限为minDist的1.75倍,然后拾取在此范围的点到ArrayList中        double maxGoodMatchDist = 1.75 * minDist;        for(DMatch match : matchesList) {            if (match.distance < maxGoodMatchDist) {               goodReferencePointsList.add(                       referenceKeypointsList.get(match.trainIdx).pt);               goodScenePointsList.add(                       sceneKeypointsList.get(match.queryIdx).pt);            }        }        // 如果在范围内的(拾取到的)点数小于4,则表示没有发现标志        if (goodReferencePointsList.size() < 4 ||                goodScenePointsList.size() < 4) {            // There are too few good points to find the homography.            return;        }        // 再从ArrayList转换成MatOfPoint2f数据结构(OpenCV中),同样是为了后面的处理        // 一个是参考图片的点数据,一个是图像帧的点数据        MatOfPoint2f goodReferencePoints = new MatOfPoint2f();        goodReferencePoints.fromList(goodReferencePointsList);        MatOfPoint2f goodScenePoints = new MatOfPoint2f();        goodScenePoints.fromList(goodScenePointsList);        // 计算单应性矩阵,根据最佳参考图像和场景图片的特征点(需要描述)        Mat homography = Calib3d.findHomography(                goodReferencePoints, goodScenePoints);        /**         * 根据单应性矩阵对参考图像帧进行透视变换,将2D场景转换成3D         * 保存在mCandidateSceneCorners         */        Core.perspectiveTransform(mReferenceCorners,                mCandidateSceneCorners, homography);        // 对mCandidateSceneCorners进行类型转换        mCandidateSceneCorners.convertTo(mIntSceneCorners,                CvType.CV_32S);        // 输入数据(四边形)必须是凸面        if (Imgproc.isContourConvex(mIntSceneCorners)) {            mCandidateSceneCorners.copyTo(mSceneCorners);        }    }

显示结果:
这里写图片描述
参考:http://arvrschool.com/read.php?tid=163&fid=52

0 0
原创粉丝点击