LeetCode OJ:Word Ladder II 解题思路过程详谈

来源:互联网 发布:游戏宣传片 知乎 编辑:程序博客网 时间:2024/06/05 20:12

Word Ladder II

 Total Accepted: 2164 Total Submissions: 24711My Submissions

Given two words (start and end), and a dictionary, find all shortest transformation sequence(s) from start to end, such that:

  1. Only one letter can be changed at a time
  2. Each intermediate word must exist in the dictionary

For example,

Given:
start = "hit"
end = "cog"
dict = ["hot","dot","dog","lot","log"]

Return

  [    ["hit","hot","dot","dog","cog"],    ["hit","hot","lot","log","cog"]  ]

Note:

  • All words have the same length.
  • All words contain only lowercase alphabetic characters.
1、乍一看这道题觉得很简单,然后快速的写完了代码,of course   Wrong Answer

起初思路很简单,既然要找最短路径,又要找所有的路径,所以我定义了node节点,保存所经过的所有的路径,然后提交代码如下,Wrong Answer

然后我拿到vs中运行,发现每次只有一个结果,而不是一组结果,细看代码,发现红线处错误,因为每次的结果中的字母是可以重复出现

struct node{string str;vector<string> path;node(string x):str(x){}};class Solution{public:void bfs(string start,string end,unordered_set<string> dict,vector<vector<string>> &store){queue<node> que;set<string> flag;//标记走过的节点bool found=false;int min_ladder=dict.size()+3;node t(start);t.path.push_back(start);que.push(t);flag.insert(start);while(!que.empty()){node cur=que.front();que.pop();for(int i=0;i<cur.str.size();i++){node next=cur;for(int j=0;j<26;j++){if(cur.str[i]!=j+'a'){next.str[i]=j+'a';if(next.str==end){found=true;next.path.push_back(next.str);if(min_ladder>=next.path.size()){//只将最短的序列加入store中min_ladder=next.path.size();store.push_back(next.path);}}if(dict.find(next.str)!=dict.end()&&flag.find(next.str)==flag.end()){next.path.push_back(next.str);flag.insert(next.str);que.push(next);}}}}//for}//while}vector<vector<string>> findLadders(string start,string end,unordered_set<string> &dict){vector<vector<string>> store;bfs(start,end,dict,store);return store;}};
2、然后修改代码如下:

思路:既然每次结果与结果间字母可以重复,不能简单的用flag标记走过的所有点,那就对每一组数据查看当前可访问的那些点,因为在node节点中有保存访问过的每个点的路径,所以只要查看新加入的点是不是在这条路径中就行了,在就加,不在不加,蓝色划线处是主要修改的位置

struct node{string str;vector<string> path;node(string x):str(x){}};class Solution{public:void bfs(string start,string end,unordered_set<string> dict,vector<vector<string>> &store){queue<node> que;unordered_set<string>::iterator it;bool found=false;int minS=0x7fffffff;node t(start);t.path.push_back(start);que.push(t);while(!que.empty()){node cur=que.front();que.pop();for(int i=0;i<cur.str.size();i++){for(int j=0;j<26;j++){node next=cur;if(cur.str[i]!=j+'a'){next.str[i]=j+'a';if(next.str==end){found=true;next.path.push_back(end);if(minS>=next.path.size()){minS=next.path.size();store.push_back(next.path);}}it=dict.find(next.str);if(!found&&it!=dict.end()&&find(next.path.begin(),next.path.end(),next.str)==next.path.end()){next.path.push_back(next.str);que.push(next);}}}}}}vector<vector<string>> findLadders(string start,string end,unordered_set<string> &dict){vector<vector<string>> store;bfs(start,end,dict,store);return store;}};
然后提交,TLE

测试用例是{"si","go","se","cm","so","ph","mt","db","mb","sb","kr","ln","tm","le","av","sm","ar","ci","ca","br","ti","ba","to","ra","fa","yo","ow","sn","ya","cr","po","fe","ho","ma","re","or","rn","au","ur",

"rh","sr","tc","lt","lo","as","fr","nb","yb","if","pb","ge","th","pm","rb","sh","co","ga","li","ha","hz","no","bi","di","hi","qa","pi","os","uh","wm","an","me","mo","na","la","st","er","sc","ne",

"mn","mi","am","ex","pt","io","be","fm","ta","tb","ni","mr","pa","he","lr","sq","ye"}                    共95个数据

Debug调试下运行结果显示用时10.3027s

然后我想大概是蓝色划线处find函数太耗时了吧,find函数是从第一个参数处向后到第二个参数处为止查找第三个参数的内容,时间复杂度太高,

虽然内存消耗多些,那我在node处再定义一个unordered_set集合,就用于使查找速度变快一些,

struct node{string str;vector<string> path;unordered_set<string> save;node(string x):str(x){}};

然后,ctrl+F5,尼玛,显示用时:42.013s,直接蹦了,想想set会好些吧,把unordered_set 换成set,尼玛,用时:24.2155s,依然高的吓人。这才95个数据而已。

--此路不通,只能另寻他法

3、用node节点把每条路径都保存下来的方法既耗时又浪费内存,所以先从这地方入手,路径不能这么保存,那怎么来存呢?

思路如下:将所有经过的点构造成一颗树,其实实质上也就是一张邻接表,节点内容保存的是相连的上一个节点,定义的数据结构为map<string,vector<string>>();广搜的目的也就是构造出这张表;

邻接表构造出来后,我们只需从end位置出发,找寻其父亲节点,一直找到start点,将所有等于最短序列长度的序列逆序加入结果集中,这里可以用list,当插入新节点时往前插入,就不需要逆序了。

代码如下:

dfs就是对邻接表的处理;map保存的就是邻接表,因为我们得知道已经访问的路径和当前节点可以访问的路径,所以定义两个集合,visitCur和visitAll保存当前和总共访问的路径,还需要引入minLen(最小路径长),curLev(当前节点个数),nextLeV(下一节点个数),对队列进行控制,对于visitAll,因为只能在dict字典中进行,所以我们可以选择将dict中所有元素初始化到visitAll中,每次把访问过的节点从中删除就行了,这样也减少了后面在dict中查找元素是否存在这一步骤

class Solution{public:void dfs(string start,string cur,list<string> p,vector<vector<string>> &store,map<string,vector<string>> map,int len){if(cur==start){vector<string> q(p.begin(),p.end());store.push_back(q);return;}if(len>0){vector<string> t=map[cur];for(string temp:t){p.push_front(temp);dfs(start,temp,p,store,map,len-1);p.pop_front();}}}void bfs(string start,string end,unordered_set<string> dict,vector<vector<string>> &store){queue<string> que;map<string,vector<string>> word_map;bool found=false;unordered_set<string> visitCur;unordered_set<string> visitAll(dict);int minLen=0;int curLev=1;int nextLev=0;word_map[start]=vector<string>();word_map[end]=vector<string>();for(string temp:dict){word_map[temp]=vector<string>();}que.push(start);visitAll.erase(start);visitCur.insert(start);while(!que.empty()){string cur=que.front();que.pop();for(int i=0;i<cur.size();i++){for(int j=0;j<26;j++){string next=cur;next[i]=j+'a';if(next!=cur&&visitAll.find(next)!=visitAll.end()){if(visitCur.find(next)==visitCur.end()){visitCur.insert(next);nextLev++;que.push(next);}word_map[next].push_back(cur);if(next==end&&!found){found=true;minLen+=2;}}}}if(--curLev==0){if(found)break;for(string temp:visitCur)visitAll.erase(temp);curLev=nextLev;nextLev=0;minLen++;}}if(found){list<string> p;p.push_front(end);dfs(start,end,p,store,map,minLen);//generateStore(start,end,word_map,store);}}vector<vector<string>> findLadders(string start,string end,unordered_set<string> &dict){vector<vector<string>> store;bfs(start,end,dict,store);return store;}};


运行该事例,时间为:0.565607s,但是提交上去之后,最后一个事例还是超时;

事例为:

{"dose","ends","dine","jars","prow","soap","guns","hops","cray","hove","ella","hour","lens","jive","wiry","earl","mara","part","flue","putt","rory","bull","york","ruts","lily","vamp",

"bask","peer","boat","dens","lyre","jets","wide","rile","boos","down","path","onyx","mows","toke","soto","dork","nape","mans","loin","jots","male","sits","minn","sale","pets",

"hugo","woke","suds","rugs","vole","warp","mite","pews","lips","pals","nigh","sulk","vice","clod","iowa","gibe","shad","carl","huns","coot","sera","mils","rose","orly","ford",

"void","time","eloy","risk","veep","reps","dolt","hens","tray","melt","rung","rich","saga","lust","yews","rode","many","cods","rape","last","tile","nosy","take","nope","toni","bank",

"jock","jody","diss","nips","bake","lima","wore","kins","cult","hart","wuss","tale","sing","lake","bogy","wigs","kari","magi","bass","pent","tost","fops","bags","duns","will","tart",

"drug","gale","mold","disk","spay","hows","naps","puss","gina","kara","zorn","boll","cams","boas","rave","sets","lego","hays","judy","chap","live","bahs","ohio","nibs","cuts",

"pups","data","kate","rump","hews","mary","stow","fang","bolt","rues","mesh","mice","rise","rant","dune","jell","laws","jove","bode","sung","nils","vila","mode","hued","cell",

"fies","swat","wags","nate","wist","honk","goth","told","oise","wail","tels","sore","hunk","mate","luke","tore","bond","bast","vows","ripe","fond","benz","firs","zeds","wary","baas",

"wins","pair","tags","cost","woes","buns","lend","bops","code","eddy","siva","oops","toed","bale","hutu","jolt","rife","darn","tape","bold","cope","cake","wisp","vats","wave",

"hems","bill","cord","pert","type","kroc","ucla","albs","yoko","silt","pock","drub","puny","fads","mull","pray","mole","talc","east","slay","jamb","mill","dung","jack","lynx","nome",

"leos","lade","sana","tike","cali","toge","pled","mile","mass","leon","sloe","lube","kans","cory","burs","race","toss","mild","tops","maze","city","sadr","bays","poet","volt","laze",

"gold","zuni","shea","gags","fist","ping","pope","cora","yaks","cosy","foci","plan","colo","hume","yowl","craw","pied","toga","lobs","love","lode","duds","bled","juts","gabs","fink",

"rock","pant","wipe","pele","suez","nina","ring","okra","warm","lyle","gape","bead","lead","jane","oink","ware","zibo","inns","mope","hang","made","fobs","gamy","fort","peak",

"gill","dino","dina","tier"}    共336条数据

用时:0.30337s,应该是结果集中数据少的原因吧,虽然耗时比上一事例少,但依然超时。

4、这么看如果要提高效率,对dfs下手比较明智,递归总归时间太久,改成迭代的,加入新方法,利用迭代,产生结果集

void generateStore(string start,string end,map<string,vector<string>> map,vector<vector<string>> &store){vector<string> stack;vector<string> seq;stack.push_back(end);while(!stack.empty()){string top=stack.back();stack.pop_back();seq.push_back(top);vector<string> &sons=map[top];for(string t:sons){stack.push_back(t);}if(!sons.size()){int index=store.size();store.push_back(vector<string>());for(int i=seq.size()-1;i>=0;--i){store[index].push_back(seq[i]);}top=seq.back();seq.pop_back();while(!seq.empty()){string fa=seq.back();vector<string> bro=map[fa];if(top!=bro[0])break;seq.pop_back();top=fa;}}}}


运行时间:0.142847,代码提交,AC

5、试试对广搜进行优化:

变动一下数据结构,进行适当剪枝

vector<vector<string>> findLadders(string start,string end,unordered_set<string> &dict){queue<string> que;unordered_map<string,pair<int,vector<string>>> word_map;unordered_map<string,pair<int,vector<string>>>::iterator it;int ladder=1;int min_ladder=dict.size()+3;int length=start.length();que.push(start);word_map[start]=pair<int,vector<string>>(1,vector<string>());while(!que.empty()&&min_ladder>ladder){string cur=que.front();que.pop();ladder=word_map[cur].first;for(int i=0;i<length;i++){string next=cur;for(char j='a';j<='z';j++){if(next[i]==j)continue;next[i]=j;if(next==end){min_ladder=ladder+1;}if(dict.find(next)!=dict.end()){it=word_map.find(next);if(it==word_map.end()){pair<int,vector<string>> t(ladder+1,vector<string>());t.second.push_back(cur);word_map[next]=t;que.push(next);}else if(it->second.first==ladder+1){it->second.second.push_back(cur);}}}}//end for}//end whilevector<vector<string>> store;if(min_ladder==dict.size()+3)return store;list<string> p;p.push_front(end);dfs(start,end,p,store,word_map,min_ladder);return store;}
运行时间:0.279754s,代码提交,AC

6、将两者优化进行结合;

最后运行时间为:0.0874874s,AC

0 0
原创粉丝点击