狗狗识别-百度西安交通大学大数据比赛baseline=0.2代码
来源:互联网 发布:大数据用户画像 编辑:程序博客网 时间:2024/04/25 14:27
题目简单介绍
官方提供100类狗的图片,选手将狗进行分类。
分析
官方提供的图片如下:
从图片可以看出背景复杂,图片中不仅有狗还有人,可能还有其他物体的背景,因此第一步需要将狗提取出来。考虑到从先训练一个狗的检测器将耗费一定的时间,因此采用目标检测SSD方法进行狗的检测。将狗检测出来再进行分类。
因此流程就是 检测 + 分类。
狗的检测核心代码
该代码使用Qt编写。项目配置如下:
INCLUDEPATH += /home/young/deeplearning/SSD/caffe/include \ += /usr/include \ += /home/young/deeplearning/SSD/caffe/src \ += /home/young/software/cuda/includeLIBS += -L/home/young/deeplearning/SSD/caffe/build/lib -lcaffeLIBS += -L/usr/lib/x86_64-linux-gnu -lopencv_core -lopencv_imgproc -lopencv_highguiLIBS += -lglog -lgflags -lprotobuf -lboost_system -lboost_thread -latlasSOURCES += \ main.cpp# cudaINCLUDEPATH += /usr/local/cuda/includeLIBS += -L/usr/local/cuda/lib64 -lcudart -lcublas -lcurand#cudnnLIBS += -L/home/young/software/cuda/lib64 -lcudnn
SSD检测代码
#include "caffe/caffe.hpp"#define USE_OPENCV#define CPU_ONLY#ifdef USE_OPENCV#include <opencv2/core/core.hpp>#include <opencv2/highgui/highgui.hpp>#include <opencv2/imgproc/imgproc.hpp>#endif // USE_OPENCV#include <algorithm>#include <iomanip>#include <iosfwd>#include <memory>#include <string>#include <utility>#include <vector>#include<fstream>#ifdef USE_OPENCVusing namespace caffe; // NOLINT(build/namespaces)std::vector<std::string> splitEx(const std::string& src, std::string separate_character){ std::vector<std::string> strs; int separate_characterLen = separate_character.size();//分割字符串的长度,这样就可以支持如“,,”多字符串的分隔符 int lastPosition = 0,index = -1; while (-1 != (index = src.find(separate_character,lastPosition))) { strs.push_back(src.substr(lastPosition,index - lastPosition)); lastPosition = index + separate_characterLen; } std::string lastString = src.substr(lastPosition);//截取最后一个分隔符后的内容 if (!lastString.empty()) strs.push_back(lastString);//如果最后一个分隔符后还有内容就入队 return strs;}class Detector { public: Detector(const string& model_file, const string& weights_file, const string& mean_file, const string& mean_value); std::vector<vector<float> > Detect(const cv::Mat& img); private: void SetMean(const string& mean_file, const string& mean_value); void WrapInputLayer(std::vector<cv::Mat>* input_channels); void Preprocess(const cv::Mat& img, std::vector<cv::Mat>* input_channels); private: shared_ptr<Net<float> > net_; cv::Size input_geometry_; int num_channels_; cv::Mat mean_;};Detector::Detector(const string& model_file, const string& weights_file, const string& mean_file, const string& mean_value) {#ifdef CPU_ONLY Caffe::set_mode(Caffe::CPU);#else Caffe::set_mode(Caffe::GPU);#endif /* Load the network. */ net_.reset(new Net<float>(model_file, TEST)); net_->CopyTrainedLayersFrom(weights_file); CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input."; CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output."; Blob<float>* input_layer = net_->input_blobs()[0]; num_channels_ = input_layer->channels(); CHECK(num_channels_ == 3 || num_channels_ == 1) << "Input layer should have 1 or 3 channels."; input_geometry_ = cv::Size(input_layer->width(), input_layer->height()); /* Load the binaryproto mean file. */ SetMean(mean_file, mean_value);}std::vector<vector<float> > Detector::Detect(const cv::Mat& img) { Blob<float>* input_layer = net_->input_blobs()[0]; input_layer->Reshape(1, num_channels_, input_geometry_.height, input_geometry_.width); /* Forward dimension change to all layers. */ net_->Reshape(); std::vector<cv::Mat> input_channels; WrapInputLayer(&input_channels); Preprocess(img, &input_channels); net_->Forward(); /* Copy the output layer to a std::vector */ Blob<float>* result_blob = net_->output_blobs()[0]; const float* result = result_blob->cpu_data(); const int num_det = result_blob->height(); vector<vector<float> > detections; for (int k = 0; k < num_det; ++k) { if (result[0] == -1) { // Skip invalid detection. result += 7; continue; } vector<float> detection(result, result + 7); detections.push_back(detection); result += 7; } return detections;}/* Load the mean file in binaryproto format. */void Detector::SetMean(const string& mean_file, const string& mean_value) { cv::Scalar channel_mean; if (!mean_file.empty()) { CHECK(mean_value.empty()) << "Cannot specify mean_file and mean_value at the same time"; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); /* Convert from BlobProto to Blob<float> */ Blob<float> mean_blob; mean_blob.FromProto(blob_proto); CHECK_EQ(mean_blob.channels(), num_channels_) << "Number of channels of mean file doesn't match input layer."; /* The format of the mean file is planar 32-bit float BGR or grayscale. */ std::vector<cv::Mat> channels; float* data = mean_blob.mutable_cpu_data(); for (int i = 0; i < num_channels_; ++i) { /* Extract an individual channel. */ cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data); channels.push_back(channel); data += mean_blob.height() * mean_blob.width(); } /* Merge the separate channels into a single image. */ cv::Mat mean; cv::merge(channels, mean); /* Compute the global mean pixel value and create a mean image * filled with this value. */ channel_mean = cv::mean(mean); mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean); } if (!mean_value.empty()) { CHECK(mean_file.empty()) << "Cannot specify mean_file and mean_value at the same time"; stringstream ss(mean_value); vector<float> values; string item; while (getline(ss, item, ',')) { float value = std::atof(item.c_str()); values.push_back(value); } CHECK(values.size() == 1 || values.size() == num_channels_) << "Specify either 1 mean_value or as many as channels: " << num_channels_; std::vector<cv::Mat> channels; for (int i = 0; i < num_channels_; ++i) { /* Extract an individual channel. */ cv::Mat channel(input_geometry_.height, input_geometry_.width, CV_32FC1, cv::Scalar(values[i])); channels.push_back(channel); } cv::merge(channels, mean_); }}/* Wrap the input layer of the network in separate cv::Mat objects * (one per channel). This way we save one memcpy operation and we * don't need to rely on cudaMemcpy2D. The last preprocessing * operation will write the separate channels directly to the input * layer. */void Detector::WrapInputLayer(std::vector<cv::Mat>* input_channels) { Blob<float>* input_layer = net_->input_blobs()[0]; int width = input_layer->width(); int height = input_layer->height(); float* input_data = input_layer->mutable_cpu_data(); for (int i = 0; i < input_layer->channels(); ++i) { cv::Mat channel(height, width, CV_32FC1, input_data); input_channels->push_back(channel); input_data += width * height; }}void Detector::Preprocess(const cv::Mat& img, std::vector<cv::Mat>* input_channels) { /* Convert the input image to the input image format of the network. */ cv::Mat sample; if (img.channels() == 3 && num_channels_ == 1) cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY); else if (img.channels() == 4 && num_channels_ == 1) cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY); else if (img.channels() == 4 && num_channels_ == 3) cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR); else if (img.channels() == 1 && num_channels_ == 3) cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR); else sample = img; cv::Mat sample_resized; if (sample.size() != input_geometry_) cv::resize(sample, sample_resized, input_geometry_); else sample_resized = sample; cv::Mat sample_float; if (num_channels_ == 3) sample_resized.convertTo(sample_float, CV_32FC3); else sample_resized.convertTo(sample_float, CV_32FC1); cv::Mat sample_normalized; cv::subtract(sample_float, mean_, sample_normalized); /* This operation will write the separate BGR planes directly to the * input layer of the network because it is wrapped by the cv::Mat * objects in input_channels. */ cv::split(sample_normalized, *input_channels); CHECK(reinterpret_cast<float*>(input_channels->at(0).data) == net_->input_blobs()[0]->cpu_data()) << "Input channels are not wrapping the input layer of the network.";}DEFINE_string(mean_file, "", "The mean file used to subtract from the input image.");DEFINE_string(mean_value, "104,117,123", "If specified, can be one value or can be same as image channels" " - would subtract from the corresponding channel). Separated by ','." "Either mean_file or mean_value should be provided, not both.");DEFINE_string(file_type, "image", "The file type in the list_file. Currently support image and video.");DEFINE_string(out_file, "", "If provided, store the detection results in the out_file.");DEFINE_double(confidence_threshold, 0.7, "Only store detections with score higher than the threshold.");int main(int argc, char** argv) { ::google::InitGoogleLogging(argv[0]); // Print output to stderr (while still logging) FLAGS_alsologtostderr = 1;#ifndef GFLAGS_GFLAGS_H_ namespace gflags = google;#endif gflags::SetUsageMessage("Do detection using SSD mode.\n" "Usage:\n" " ssd_detect [FLAGS] model_file weights_file list_file\n"); gflags::ParseCommandLineFlags(&argc, &argv, true); /* if (argc < 4) { gflags::ShowUsageWithFlagsRestrict(argv[0], "examples/ssd/ssd_detect"); return 1; }*/ const string& model_file = "./model/deploy.prototxt"; const string& weights_file = "./model/VGG_VOC0712Plus_SSD_300x300_iter_240000.caffemodel"; const string& mean_file = FLAGS_mean_file; const string& mean_value = FLAGS_mean_value; const string& file_type = FLAGS_file_type; const string& out_file = FLAGS_out_file; const float confidence_threshold = FLAGS_confidence_threshold; // Initialize the network. Detector detector(model_file, weights_file, mean_file, mean_value); // Set the output mode. std::streambuf* buf = std::cout.rdbuf(); std::ofstream outfile; if (!out_file.empty()) { outfile.open(out_file.c_str()); if (outfile.good()) { buf = outfile.rdbuf(); } } std::ostream out(buf); // Process image one by one. std::ifstream infile("./img/val.txt"); std::string file; std::ofstream outTrainfile; //outTrainfile.open("./train.txt"); while (std::getline(infile, file)) { outTrainfile.open("./val.txt", std::ios::app); std::vector<std::string> line = splitEx(file, " "); std::string imgPath = line[0]; std::string label = line[1].substr(0,line[1].find("\r")); std::string prefixImg = splitEx(imgPath, ".")[0]; std::string postfixImg = splitEx(imgPath, ".")[1]; if (file_type == "image") { cv::Mat img = cv::imread("./img/Img/" + imgPath, -1); CHECK(!img.empty()) << "Unable to decode image " << file; std::vector<vector<float> > detections = detector.Detect(img); /* Print the detection results. */ int index = 0; for (int i = 0; i < detections.size(); ++i) { const vector<float>& d = detections[i]; // Detection format: [image_id, label, score, xmin, ymin, xmax, ymax]. CHECK_EQ(d.size(), 7); const float score = d[2]; if (score >= confidence_threshold && static_cast<int>(d[1]) == 12) { out << imgPath << " "; out << label << " "; out << static_cast<int>(d[1]) << " "; out << score << " "; out << static_cast<int>(d[3] * img.cols) << " "; out << static_cast<int>(d[4] * img.rows) << " "; out << static_cast<int>(d[5] * img.cols) << " "; out << static_cast<int>(d[6] * img.rows) << std::endl; int xmin = static_cast<int>(d[3] * img.cols); int ymin = static_cast<int>(d[4] * img.rows); int w = static_cast<int>(d[5] * img.cols); int h = static_cast<int>(d[6] * img.rows); if(xmin < 0) xmin = 0; if(ymin < 0) ymin = 0; if(xmin + w > img.cols) w = img.cols - xmin; if(ymin + h > img.rows) h = img.rows - ymin; cv::Mat saveImg(img, cv::Rect(xmin, ymin, w, h)); std::ostringstream stream; stream<<index; std::string newImgPath = prefixImg + "_" + stream.str() + "."+postfixImg; //outTrainfile<< newImgPath << " " << label << "\n"; std::string savePath = "./val/" + label + "/" + newImgPath; outTrainfile << savePath << " " << label << "\n"; cv::imwrite(savePath, saveImg); index++; outTrainfile.close(); } } } else if (file_type == "video") { cv::VideoCapture cap(file); if (!cap.isOpened()) { LOG(FATAL) << "Failed to open video: " << file; } cv::Mat img; int frame_count = 0; while (true) { bool success = cap.read(img); if (!success) { LOG(INFO) << "Process " << frame_count << " frames from " << file; break; } CHECK(!img.empty()) << "Error when read frame"; std::vector<vector<float> > detections = detector.Detect(img); /* Print the detection results. */ for (int i = 0; i < detections.size(); ++i) { const vector<float>& d = detections[i]; // Detection format: [image_id, label, score, xmin, ymin, xmax, ymax]. CHECK_EQ(d.size(), 7); const float score = d[2]; if (score >= confidence_threshold) { /* out << file << "_"; out << std::setfill('0') << std::setw(6) << frame_count << " "; out << static_cast<int>(d[1]) << " "; out << score << " "; out << static_cast<int>(d[3] * img.cols) << " "; out << static_cast<int>(d[4] * img.rows) << " "; out << static_cast<int>(d[5] * img.cols) << " "; out << static_cast<int>(d[6] * img.rows) << std::endl;*/ } } ++frame_count; } if (cap.isOpened()) { cap.release(); } } else { LOG(FATAL) << "Unknown file_type: " << file_type; } } outTrainfile.close(); return 0;}#elseint main(int argc, char** argv) { LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV.";}#endif // USE_OPENCV
分类器选择
随着深度学习的发展,涌现出一些效果不错的网络,有AlexNet, VGG,ResNet,google系列的V1,V2,V3,V4。从ImageNet结果可以看出网络越深,分类效果越好。而且这些网络都公开了训练好的模型,由于比赛训练数据有限,采用迁移学习的思想进行网络学习。本人实验过VGG, ResNet18,V3,V3的网络效果最好。
训练代码如下:
from keras.applications.inception_v3 import InceptionV3import osfrom keras.layers import Flatten, Dense, AveragePooling2Dfrom keras.models import Modelfrom keras.optimizers import RMSprop, SGDfrom keras.callbacks import ModelCheckpointfrom keras.preprocessing.image import ImageDataGeneratorlearning_rate = 0.0001img_width = 299img_height = 299nbr_train_samples = 3019nbr_validation_samples = 758nbr_epochs = 25batch_size = 1nb_classes= 100train_data_dir = './train'val_data_dir = './val'DogNames = []for i in range(0,100): DogNames.append(str(i))print('Loading InceptionV3 Weights ...')InceptionV3_notop = InceptionV3(include_top=False, weights='imagenet', input_tensor=None, input_shape=(299, 299, 3))print('Adding Average Pooling Layer and Softmax Output Layer ...')output = InceptionV3_notop.get_layer(index = -1).output # Shape: (8, 8, 2048)output = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(output)output = Flatten(name='flatten')(output)output = Dense(nb_classes, activation='softmax', name='predictions')(output)InceptionV3_model = Model(InceptionV3_notop.input, output)optimizer = SGD(lr = learning_rate, momentum = 0.9, decay = 0.0, nesterov = True)InceptionV3_model.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])best_model_file = "./weights.h5"best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)# 数据扩增train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.1, zoom_range=0.1, rotation_range=10., width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)# this is the augmentation configuration we will use for validation:# only rescalingval_datagen = ImageDataGenerator(rescale=1./255)train_generator = train_datagen.flow_from_directory( train_data_dir, target_size = (img_width, img_height), batch_size = batch_size, shuffle = True, classes = DogNames, class_mode = 'categorical' )print train_generator.class_indicesvalidation_generator = val_datagen.flow_from_directory( val_data_dir, target_size=(img_width, img_height), batch_size=batch_size, shuffle = True, classes = DogNames, class_mode = 'categorical' )InceptionV3_model.fit_generator( train_generator, samples_per_epoch = nbr_train_samples, nb_epoch = nbr_epochs, validation_data = validation_generator, nb_val_samples = nbr_validation_samples, callbacks = [best_model])
完成模型训练之后,直接测试错误率在0.21,此时对测试数据通过预处理生成多个图片进行测试,提升到0.20。
# test data generator for predictiontest_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.1, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)print('Loading model and weights from training process ...')InceptionV3_model = load_model(weights_path)for idx in range(nbr_augmentation): print('{}th augmentation for testing ...'.format(idx)) random_seed = np.random.random_integers(0, 100000) test_generator = test_datagen.flow_from_directory( test_data_dir, target_size=(img_width, img_height), batch_size=batch_size, shuffle = False, # Important !!! seed = random_seed, classes = None, class_mode = None) test_image_list = test_generator.filenames #print('image_list: {}'.format(test_image_list[:10])) print('Begin to predict for testing data ...') if idx == 0: predictions = InceptionV3_model.predict_generator(test_generator, nbr_test_samples) else: predictions += InceptionV3_model.predict_generator(test_generator, nbr_test_samples)
进一步
- 进行度量学习,减少类内距,增大类间距,如centerloss, tripletloss。由于centerloss每次需要得到同类的特征中心,同类的狗由于不同的位姿,会导致特征中心不稳定,效果不好
- 将数据划分位10分,随机选取9份进行训练,一份测试,训练多个模型进行测试。
- 将不同分类器得到的特征进行融合,拼接在一起训练。
阅读全文
0 0
- 狗狗识别-百度西安交通大学大数据比赛baseline=0.2代码
- 第二届百度&西安交通大学大数据竞赛
- 第二届高校大数据比赛之鼠标轨迹识别
- 阿里大数据比赛
- 天池大数据比赛
- 大数据比赛-综述
- 大数据比赛
- 阿里大数据比赛总结
- 2016CCF大数据与计算智能大赛——搜狗用户画像比赛总结
- 鼠鼠爱比赛—13年大数据比赛汇总
- 天池大数据比赛-菜鸟仓库比赛-第一赛季记录
- 天池大数据比赛-菜鸟仓库比赛-第二赛季记录
- 天池大数据比赛,菜鸟仓库比赛,御膳房操作
- 百度大数据分享
- 阿里大数据比赛排名获取2
- 大数据和人工智能的相关比赛
- 数据科学比赛公开代码学习链接
- 安装vs2010找不到baseline数据
- Java基础知识
- 字符串复制、测字符串的有效长度
- Python高级特性-切片(Slice)
- CocoaPods的安装及使用
- 素数筛法
- 狗狗识别-百度西安交通大学大数据比赛baseline=0.2代码
- C/C++面试知识点总结(一)
- HDU3388
- cp命令总结
- C/C++--私有继承
- SSAS高级应用
- python3 执行pip3 install requests ,提示找不到ssl模块
- babel的简单使用
- assert、const