从一个深度图里面导出NARF特征

来源:互联网 发布:枪花和涅槃 知乎 编辑:程序博客网 时间:2024/06/10 23:49

转载:http://m.blog.csdn.net/article/details?id=51107194

本节将显示如何提取出NARF关键点通过NARF描述器从一个深度图里面。

以下是一段代码

#include <iostream>#include <boost/thread/thread.hpp>#include <pcl/range_image/range_image.h>#include <pcl/io/pcd_io.h>#include <pcl/visualization/range_image_visualizer.h>#include <pcl/visualization/pcl_visualizer.h>#include <pcl/features/range_image_border_extractor.h>#include <pcl/keypoints/narf_keypoint.h>#include <pcl/features/narf_descriptor.h>#include <pcl/console/parse.h>typedef pcl::PointXYZ PointType;// --------------------// -----Parameters-----// --------------------float angular_resolution = 0.5f;float support_size = 0.2f;pcl::RangeImage::CoordinateFrame coordinate_frame = pcl::RangeImage::CAMERA_FRAME;bool setUnseenToMaxRange = false;bool rotation_invariant = true;// --------------// -----Help-----// --------------void printUsage (const char* progName){  std::cout << "\n\nUsage: "<<progName<<" [options] <scene.pcd>\n\n"            << "Options:\n"            << "-------------------------------------------\n"            << "-r <float>   angular resolution in degrees (default "<<angular_resolution<<")\n"            << "-c <int>     coordinate frame (default "<< (int)coordinate_frame<<")\n"            << "-m           Treat all unseen points to max range\n"            << "-s <float>   support size for the interest points (diameter of the used sphere - "                                                                  "default "<<support_size<<")\n"            << "-o <0/1>     switch rotational invariant version of the feature on/off"            <<               " (default "<< (int)rotation_invariant<<")\n"            << "-h           this help\n"            << "\n\n";}void setViewerPose (pcl::visualization::PCLVisualizer& viewer, const Eigen::Affine3f& viewer_pose){  Eigen::Vector3f pos_vector = viewer_pose * Eigen::Vector3f (0, 0, 0);  Eigen::Vector3f look_at_vector = viewer_pose.rotation () * Eigen::Vector3f (0, 0, 1) + pos_vector;  Eigen::Vector3f up_vector = viewer_pose.rotation () * Eigen::Vector3f (0, -1, 0);  viewer.setCameraPosition (pos_vector[0], pos_vector[1], pos_vector[2],                            look_at_vector[0], look_at_vector[1], look_at_vector[2],                            up_vector[0], up_vector[1], up_vector[2]);}// --------------// -----Main-----// --------------int main (int argc, char** argv){  // --------------------------------------  // -----Parse Command Line Arguments-----  // --------------------------------------  if (pcl::console::find_argument (argc, argv, "-h") >= 0)  {    printUsage (argv[0]);    return 0;  }  if (pcl::console::find_argument (argc, argv, "-m") >= 0)  {    setUnseenToMaxRange = true;    cout << "Setting unseen values in range image to maximum range readings.\n";  }  if (pcl::console::parse (argc, argv, "-o", rotation_invariant) >= 0)    cout << "Switching rotation invariant feature version "<< (rotation_invariant ? "on" : "off")<<".\n";  int tmp_coordinate_frame;  if (pcl::console::parse (argc, argv, "-c", tmp_coordinate_frame) >= 0)  {    coordinate_frame = pcl::RangeImage::CoordinateFrame (tmp_coordinate_frame);    cout << "Using coordinate frame "<< (int)coordinate_frame<<".\n";  }  if (pcl::console::parse (argc, argv, "-s", support_size) >= 0)    cout << "Setting support size to "<<support_size<<".\n";  if (pcl::console::parse (argc, argv, "-r", angular_resolution) >= 0)    cout << "Setting angular resolution to "<<angular_resolution<<"deg.\n";  angular_resolution = pcl::deg2rad (angular_resolution);    // ------------------------------------------------------------------  // -----Read pcd file or create example point cloud if not given-----  // ------------------------------------------------------------------  pcl::PointCloud<PointType>::Ptr point_cloud_ptr (new pcl::PointCloud<PointType>);  pcl::PointCloud<PointType>& point_cloud = *point_cloud_ptr;  pcl::PointCloud<pcl::PointWithViewpoint> far_ranges;  Eigen::Affine3f scene_sensor_pose (Eigen::Affine3f::Identity ());  std::vector<int> pcd_filename_indices = pcl::console::parse_file_extension_argument (argc, argv, "pcd");  if (!pcd_filename_indices.empty ())  {    std::string filename = argv[pcd_filename_indices[0]];    if (pcl::io::loadPCDFile (filename, point_cloud) == -1)    {      cerr << "Was not able to open file \""<<filename<<"\".\n";      printUsage (argv[0]);      return 0;    }    scene_sensor_pose = Eigen::Affine3f (Eigen::Translation3f (point_cloud.sensor_origin_[0],                                                               point_cloud.sensor_origin_[1],                                                               point_cloud.sensor_origin_[2])) *                        Eigen::Affine3f (point_cloud.sensor_orientation_);    std::string far_ranges_filename = pcl::getFilenameWithoutExtension (filename)+"_far_ranges.pcd";    if (pcl::io::loadPCDFile (far_ranges_filename.c_str (), far_ranges) == -1)      std::cout << "Far ranges file \""<<far_ranges_filename<<"\" does not exists.\n";  }  else  {    setUnseenToMaxRange = true;    cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";    for (float x=-0.5f; x<=0.5f; x+=0.01f)    {      for (float y=-0.5f; y<=0.5f; y+=0.01f)      {        PointType point;  point.x = x;  point.y = y;  point.z = 2.0f - y;        point_cloud.points.push_back (point);      }    }    point_cloud.width = (int) point_cloud.points.size ();  point_cloud.height = 1;  }    // -----------------------------------------------  // -----Create RangeImage from the PointCloud-----  // -----------------------------------------------  float noise_level = 0.0;  float min_range = 0.0f;  int border_size = 1;  boost::shared_ptr<pcl::RangeImage> range_image_ptr (new pcl::RangeImage);  pcl::RangeImage& range_image = *range_image_ptr;     range_image.createFromPointCloud (point_cloud, angular_resolution, pcl::deg2rad (360.0f), pcl::deg2rad (180.0f),                                   scene_sensor_pose, coordinate_frame, noise_level, min_range, border_size);  range_image.integrateFarRanges (far_ranges);  if (setUnseenToMaxRange)    range_image.setUnseenToMaxRange ();    // --------------------------------------------  // -----Open 3D viewer and add point cloud-----  // --------------------------------------------  pcl::visualization::PCLVisualizer viewer ("3D Viewer");  viewer.setBackgroundColor (1, 1, 1);  pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> range_image_color_handler (range_image_ptr, 0, 0, 0);  viewer.addPointCloud (range_image_ptr, range_image_color_handler, "range image");  viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "range image");  //viewer.addCoordinateSystem (1.0f, "global");  //PointCloudColorHandlerCustom<PointType> point_cloud_color_handler (point_cloud_ptr, 150, 150, 150);  //viewer.addPointCloud (point_cloud_ptr, point_cloud_color_handler, "original point cloud");  viewer.initCameraParameters ();  setViewerPose (viewer, range_image.getTransformationToWorldSystem ());    // --------------------------  // -----Show range image-----  // --------------------------  pcl::visualization::RangeImageVisualizer range_image_widget ("Range image");  range_image_widget.showRangeImage (range_image);    // --------------------------------  // -----Extract NARF keypoints-----  // --------------------------------  pcl::RangeImageBorderExtractor range_image_border_extractor;  pcl::NarfKeypoint narf_keypoint_detector;  narf_keypoint_detector.setRangeImageBorderExtractor (&range_image_border_extractor);  narf_keypoint_detector.setRangeImage (&range_image);  narf_keypoint_detector.getParameters ().support_size = support_size;    pcl::PointCloud<int> keypoint_indices;  narf_keypoint_detector.compute (keypoint_indices);  std::cout << "Found "<<keypoint_indices.points.size ()<<" key points.\n";  // ----------------------------------------------  // -----Show keypoints in range image widget-----  // ----------------------------------------------  //for (size_t i=0; i<keypoint_indices.points.size (); ++i)    //range_image_widget.markPoint (keypoint_indices.points[i]%range_image.width,                                  //keypoint_indices.points[i]/range_image.width);    // -------------------------------------  // -----Show keypoints in 3D viewer-----  // -------------------------------------  pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints_ptr (new pcl::PointCloud<pcl::PointXYZ>);  pcl::PointCloud<pcl::PointXYZ>& keypoints = *keypoints_ptr;  keypoints.points.resize (keypoint_indices.points.size ());  for (size_t i=0; i<keypoint_indices.points.size (); ++i)    keypoints.points[i].getVector3fMap () = range_image.points[keypoint_indices.points[i]].getVector3fMap ();  pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> keypoints_color_handler (keypoints_ptr, 0, 255, 0);  viewer.addPointCloud<pcl::PointXYZ> (keypoints_ptr, keypoints_color_handler, "keypoints");  viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "keypoints");    // ------------------------------------------------------  // -----Extract NARF descriptors for interest points-----  // ------------------------------------------------------  std::vector<int> keypoint_indices2;  keypoint_indices2.resize (keypoint_indices.points.size ());  for (unsigned int i=0; i<keypoint_indices.size (); ++i) // This step is necessary to get the right vector type    keypoint_indices2[i]=keypoint_indices.points[i];  pcl::NarfDescriptor narf_descriptor (&range_image, &keypoint_indices2);  narf_descriptor.getParameters ().support_size = support_size;  narf_descriptor.getParameters ().rotation_invariant = rotation_invariant;  pcl::PointCloud<pcl::Narf36> narf_descriptors;  narf_descriptor.compute (narf_descriptors);  cout << "Extracted "<<narf_descriptors.size ()<<" descriptors for "                      <<keypoint_indices.points.size ()<< " keypoints.\n";    //--------------------  // -----Main loop-----  //--------------------  while (!viewer.wasStopped ())  {    range_image_widget.spinOnce ();  // process GUI events    viewer.spinOnce ();    pcl_sleep(0.01);  }}

一开始我们做的是命令行解析,从磁盘中读取点云文件,创建一个深度图,把NARF特征点导出。

我们感兴趣的部分从下面开始:

std::vector<int> keypoint_indices2;keypoint_indices2.resize(keypoint_indices.points.size());for (unsigned int i=0; i<keypoint_indices.size(); ++i) // This step is necessary to get the right vector type  keypoint_indices2[i]=keypoint_indices.points[i];

这里我们拷贝向量的下标作为特征的输入:

pcl::NarfDescriptor narf_descriptor(&range_image, &keypoint_indices2);narf_descriptor.getParameters().support_size = support_size;narf_descriptor.getParameters().rotation_invariant = rotation_invariant;pcl::PointCloud<pcl::Narf36> narf_descriptors;narf_descriptor.compute(narf_descriptors);cout << "Extracted "<<narf_descriptors.size()<<" descriptors for "<<keypoint_indices.points.size()<< " keypoints.\n";

这个代码是描述器里面的计算部分。它先第一步创造了NarfDescriptor这个对象,然后把它作为输入值,然后有两个很重要的参数被设置了。支持的尺寸,决定了描述器计算的面积,如果NARF描述器里面的旋转不变量会被使用的话。接下去我们创造了输出点云然后做实际的计算。最后,我们输出了关键点的数量和导出描述器的数量。这个数量将会改变。有可能,它会发生计算失败的情况,因为没有足够的点在深度图像里面。或者可能会有多重描述器在同一个地方,虽然属于不同的方向域。

最终结果的点云包含了Narf26的类型。下面的代码把关键点的位置在深度图控件里面可视化出来,还有一个是在3D viewer里面可视化出来。

然后我们运行

./narf_feature_extraction -m

这将自动生成矩形浮动的点云。关键点会在角上被察觉。参数-m是必要的,因为矩形周围的区域是看不到的因此系统是不会把它看做是一个角。-m的选项改变不可见的区域扩大深度的读取范围,从而使系统可以用到那些角

你也可以让这个程序读取一个点云文件

./narf_feature_extraction <point_cloud.pcd>

0 0