caffe添加Layer,复现Feature Learning based Deep Supervidsed Hashing with PL
来源:互联网 发布:app软件开发流程 编辑:程序博客网 时间:2024/05/16 12:40
前一段时间在搞图像哈希。因为南京大学这个文章很不错,但是源代码是matconvertnet写的。 无法实用。
因此,用caffe复现。c++调用API即可。 测试的 mAP=0.7459 稍好于论文中的结果(0.713)。
/************************************************************************* File Name: deep_feature_hash_layer.cpp Author: bin.wang Mail: sa615168@mail.ustc.edu.cn Created Time: Fri 03 Mar 2017 11:23:12 AM CST ************************************************************************/#include <algorithm>#include <vector>#include "caffe/layers/deep_feature_hash_layer.hpp"#include "caffe/util/math_functions.hpp"namespace caffe { template <typename Dtype> void DeepFeatureHashLayer<Dtype>::LayerSetUp( const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top){ LossLayer<Dtype>::LayerSetUp(bottom,top); CHECK_EQ(bottom[0]->height(),1); CHECK_EQ(bottom[0]->width(),1); CHECK_EQ(bottom[1]->height(),1); CHECK_EQ(bottom[1]->width(),1); const int batchSize = bottom[0]->num(); const int channels = bottom[0]->channels(); forward_data_.Reshape(1,channels,1,1); //allocate memory ,shape:[1,code_length,1,1] diff_.Reshape(1,channels,1,1); ALPHA_.Reshape(batchSize,batchSize,1,1); //upper case ALPHA_ (matrix : batchSize* batchSize) U_.Reshape(channels,batchSize,1,1); reg_Termfactor = this->layer_param().deep_hash_param().reg_factor(); } template <typename Dtype> void DeepFeatureHashLayer<Dtype>::Forward_cpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ const int batchSize = bottom[0]->num(); const int channels = bottom[0]->channels(); Dtype * bout = bottom[0]->mutable_cpu_diff(); Dtype hash_loss(0.0); Dtype theta_ij(0.0); Dtype sim_term(0.0); Dtype regular_term(0.0); bool simliarity_ij(0); Dtype* alpha = ALPHA_.mutable_cpu_data(); caffe_set(channels* batchSize,Dtype(0),bout); Dtype* pu = U_.mutable_cpu_data(); for(int i = 0; i< channels; ++i){ //transpose bottom data[features] ==> U_ for(int j = 0; j< batchSize; ++j){ *(pu+ i*batchSize+j) = *(bottom[0]->cpu_data()+j*channels+i); //std::cout << *(pu+ i*batchSize+j)<< " "; } //std::cout <<std::endl; } for(int i = 0;i < batchSize; i++){ for(int j= 0;j<batchSize;j++){ //1-compute theta_ij theta_ij = caffe_cpu_dot(channels, bottom[0]->cpu_data()+(i*channels), bottom[0]->cpu_data()+(j*channels)); // std::cout << "theta_ij "<< theta_ij <<std::endl; theta_ij *= 0.5; // theta_ij has been compute already simliarity_ij = (static_cast<int>(bottom[1]->cpu_data()[i]) == static_cast<int>(bottom[1]->cpu_data()[j])); sim_term += simliarity_ij * theta_ij - log(1+exp(theta_ij)); // end of loss of first term // compute diff for backward *((alpha + i *batchSize)+j) = (1/(1+exp(-theta_ij)) - simliarity_ij) ; //ALPHA_[i][j] assignment /* for debug */ } // compute bi Dtype* tmp_data = forward_data_.mutable_cpu_data(); for(int k = 0; k<12; ++k){ *tmp_data = elemswise_sign(*(bottom[0]->cpu_data()+(i*channels)+k)); tmp_data++; //std::cout << *(bottom[0]->cpu_data()+(i*channels)+k)<< ":"; //std::cout << forward_data_.cpu_data()[k] << " "; } // bi has been computed //std::cout << std::endl; // compute bi - ui caffe_sub(channels, forward_data_.cpu_data(), //bi bottom[0]->cpu_data()+(i*channels), //net output binary code(ui) diff_.mutable_cpu_data()); //bi - ui //for(int k = 0;k<12; ++k){ // std::cout <<*(diff_.cpu_data()+k)<< " "; //} //std::cout << std::endl; //std::cout << caffe_cpu_dot(channels,diff_.cpu_data(),diff_.cpu_data()) << std::endl; regular_term += caffe_cpu_dot(channels,diff_.cpu_data(),diff_.cpu_data()); caffe_cpu_axpby( channels, (-2* reg_Termfactor) / (batchSize ), diff_.cpu_data(), //bi - ui Dtype(1.0), bout + (i*channels) //dj/du ); /* for(int ki = 0; ki<12;ki++){ std::cout << *(bout + (i*channels)+ki) << " "; } std::cout << std::endl;*/ //if(i%40 ==0){std::cout << sim_term <<std::endl;} } /*for(int nx=0; nx<batchSize; ++nx){ std::cout << *(ALPHA_.cpu_data()+nx) << std::endl; }*/ for(int chi = 0; chi< batchSize; ++chi){ for(int chk = 0; chk <channels; ++chk){ *(bout+chi*channels+chk) += caffe_cpu_dot(batchSize, ALPHA_.cpu_data()+(chi*batchSize), U_.cpu_data()+(chk * batchSize))/(batchSize*(batchSize-1)); //std::cout << (*(bout+chi*channels+chk))<<std::endl; } } sim_term /= batchSize * (batchSize); regular_term /= batchSize; //std::cout << "sim_term " << sim_term << std::endl; //std::cout << "regular_term " << regular_term << std::endl << std::endl; hash_loss = reg_Termfactor * regular_term - sim_term; //TODO: eita top[0]->mutable_cpu_data()[0] = hash_loss; }template <typename Dtype>void DeepFeatureHashLayer<Dtype>::Backward_cpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom){ if(propagate_down[1]){ LOG(FATAL) << this->type() << "Layer can not backpropagate to lable inputs."; } /* Dtype* bout = bottom[0]->mutable_cpu_diff(); const int batchSize = bottom[0]->num(); const int channels = bottom[0]->channels();*/ }#ifdef CPU_ONLYSTUB_GPU(DeepFeatureHashLayer);#endifINSTANTIATE_CLASS(DeepFeatureHashLayer);REGISTER_LAYER_CLASS(DeepFeatureHash);}// namespace caffe
中间加了一些调试信息。
0 0
- caffe添加Layer,复现Feature Learning based Deep Supervidsed Hashing with PL
- Feature Learning based Deep Supervised Hashing with Pairwise Labels
- Feature Learning Based Deep Supervised Hashing with Pairwise Labels
- A Discriminative Feature Learning Approach for Deep Face Recognition 原理及在caffe实验复现
- A Discriminative Feature Learning Approach for Deep Face Recognition 原理及在caffe实验复现
- A Discriminative Feature Learning Approach for Deep Face Recognition 原理及在caffe实验复现
- <Learning Transferable Features with Deep Adaptation Networks>caffe 添加MMDLoss层(caffe 自定义网络层)
- Simultaneous Feature Learning and Hash Coding with Deep Neural Networks
- Learning Policies for Adaptive Tracking with Deep Feature Cascades
- Automatic Feature Learning for Glaucoma Detection Based on Deep Learning论文理解
- Deep Reinforcement Learning-based Image Captioning with Embedding Reward
- Deep Reinforcement Learning-based Image Captioning with Embedding Reward
- DIY Deep Learning for Vision: A Tutorial with Caffe
- Deep Learning for Computer Vision with Caffe and cuDNN
- A Practical Introduction to Deep Learning with Caffe
- TX1上Caffe Deep Learning Framework with cuDNN Support安装
- Caffe (CNN, deep learning )
- Deep Learning -- Caffe学习
- 521. Longest Uncommon Subsequence I
- 基于HTTP协议的轻量级开源简单队列服务:HTTPSQS
- 注册事件
- android 小米手机 相册图片路径空指针的问题
- MySQL 中 truncate、delete、drop 的区别
- caffe添加Layer,复现Feature Learning based Deep Supervidsed Hashing with PL
- 今天在弄openwrt gpio管脚输出
- Hive(十一)--数据去重及row_number()
- ZOJ 3956 Course Selection System (dp)
- ERROR 1665 (HY000): Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT
- js预览本地资源
- 算法导论 练习题 8.1-4
- The working copy 'xxx' has uncommitted changes.
- HTML5 canvas 平铺的几种方法