Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
- Only one letter can be changed at a time
- Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
For example,
Given:
beginWord ="hit"
endWord ="cog"
wordList =["hot","dot","dog","lot","log","cog"]
beginWord =
endWord =
wordList =
Return
[ ["hit","hot","dot","dog","cog"], ["hit","hot","lot","log","cog"] ]
Note:
- Return an empty list if there is no such transformation sequence.
- All words have the same length.
- All words contain only lowercase alphabetic characters.
- You may assume no duplicates in the word list.
- You may assume beginWord and endWord are non-empty and are not the same.
這題是 Word Ladder 的衍生題,難度提升了很多
一開始看到這題以為是要用 DFS 寫,但這題除了要求要找出變化過程,而且還是要最短的變化
因此還是得用 BFS 來解,不過得做一些變化才行
首先看第一種解法,用最基本的 BFS 來改
主要想法如下
- queue 改存 vector<string>,用來記錄整個變化過程
- 因為使用BFS,所以找到答案的時候,vector 的長度就是最短的。因此當後來的 vector 的長度比答案的長度還長的時候,就不需要繼續下去了
- 在還沒有找到答案之前,如果 vector 的長度比上一次的長,代表又多了一個字詞的變化,這時候可以把之前用過的字都從 set 裡面刪除,避免重複使用
- 其餘部分就和一般的 BFS 想法一樣
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Solution { | |
public: | |
vector<vector<string>> findLadders(string beginWord, string endWord, vector<string>& wordList) { | |
//>> transform wordList to hash map | |
unordered_set<string> wordSet; | |
for(const auto &w : wordList) { | |
wordSet.insert(w); | |
} | |
vector<vector<string>> ans; | |
//>> check endWord is valid or not | |
if(wordSet.find(endWord) == wordSet.end()) { | |
return ans; | |
} | |
queue<vector<string>> wordQue; | |
wordQue.push({beginWord}); | |
vector<string> usedWords; | |
int ansLen = INT_MAX; | |
int curLen = 1; | |
const int wordLen = beginWord.length(); | |
while(!wordQue.empty()) { | |
vector<string> tmpWordVec = wordQue.front(); | |
//>> no need to check further | |
if(tmpWordVec.size() >= ansLen) { | |
break; | |
} | |
//>> remove all words that are used | |
//>> after adding one more word into vector | |
if(tmpWordVec.size() > curLen) { | |
for(const auto &w : usedWords) { | |
wordSet.erase(w); | |
} | |
usedWords.clear(); | |
curLen = tmpWordVec.size(); | |
} | |
bool refresh = false; | |
const string word = tmpWordVec.back(); | |
for(int i = 0; i < wordLen; i++) { | |
string newWord = word; | |
for(char ch = 'a'; ch <= 'z'; ch++) { | |
newWord[i] = ch; | |
if(wordSet.find(newWord) != wordSet.end()) { | |
usedWords.push_back(newWord); | |
vector<string> vec = tmpWordVec; | |
vec.push_back(newWord); | |
if(endWord.compare(newWord) == 0) { | |
//>> find the ans | |
ans.push_back(vec); | |
//>> update the ansLen to the min length | |
if(ansLen == INT_MAX) { | |
ansLen = vec.size(); | |
} | |
refresh = true; | |
break; | |
} | |
else { | |
//>> still not find the ans | |
wordQue.push(vec); | |
} | |
} | |
} | |
if(refresh) { | |
break; | |
} | |
} | |
wordQue.pop(); | |
} | |
return ans; | |
} | |
}; |
想法如下
- 一樣準備兩個 set,分別是 headSet 和 tailSet,代表從頭的方向開始,以及從尾的方向開始
- 再多準備兩個 multi_map<string, vector<string>>,用來記錄每個 word 是怎麼變化過來的
- 因為每次歷遍,都是用 size 比較小的 set 來做,所以並不會知道目前是從頭開始的方向,還是從尾開始的方向,因此多用一個變數來記錄,這樣在插入新的 word 的時候,才能知道插入的順序性
- 因為使用 multi_map,代表同一個 key 可能有多個 value,所以使用 equal_range 來找到所有符合的 value
- 每次歷遍完 smallerSet 之後,在換成 nextReachableSet 之前,把用過的 words 和不再需要的 map value,都刪除掉,以加快之後的速度
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Solution { | |
public: | |
vector<vector<string>> findLadders(string beginWord, string endWord, vector<string>& wordList) { | |
//>> transform wordList to hash map | |
unordered_set<string> wordSet; | |
for(const auto &w : wordList) { | |
wordSet.insert(w); | |
} | |
vector<vector<string>> ans; | |
if(wordSet.find(endWord) == wordSet.end()) { | |
//>> endWord itself is not valid | |
return ans; | |
} | |
else { | |
//>> remove beginWord and endWord from hash map | |
wordSet.erase(beginWord); | |
wordSet.erase(endWord); | |
} | |
//>> prepare set and map for both ends | |
unordered_set<string> headSet, tailSet, *smallerSetPtr, *biggerSetPtr; | |
unordered_multimap<string, vector<string>> headMap, tailMap, *smallerMapPtr, *biggerMapPtr; | |
headSet.insert(beginWord); | |
headMap.insert({beginWord, {beginWord}}); | |
tailSet.insert(endWord); | |
tailMap.insert({endWord, {endWord}}); | |
bool isOver = false; | |
bool isHeadSmaller; | |
const int wordLen = beginWord.length(); | |
//>> use BFS from both ends | |
while(!headSet.empty() && !tailSet.empty()) { | |
//>> find smaller set to iterate | |
if(headSet.size() < tailSet.size()) { | |
smallerSetPtr = &headSet; | |
smallerMapPtr = &headMap; | |
biggerSetPtr = &tailSet; | |
biggerMapPtr = &tailMap; | |
isHeadSmaller = true; | |
} | |
else { | |
smallerSetPtr = &tailSet; | |
smallerMapPtr = &tailMap; | |
biggerSetPtr = &headSet; | |
biggerMapPtr = &headMap; | |
isHeadSmaller = false; | |
} | |
unordered_set<string> nextReachableSet; | |
for(const auto &w : *smallerSetPtr) { | |
vector<vector<string>> prvSourceVec; | |
for(int i = 0; i < wordLen; i++) { | |
string newWord = w; | |
for(char ch = 'a'; ch <='z'; ch++) { | |
newWord[i] = ch; //>> transform one word each time | |
//>> find the ans | |
if(biggerSetPtr->find(newWord) != biggerSetPtr->end()) { | |
//>> get all paths to w | |
auto smallerMapRange = smallerMapPtr->equal_range(w); | |
vector<vector<string>> smallerMapVec; | |
for(auto it = smallerMapRange.first; it != smallerMapRange.second; it++) { | |
smallerMapVec.push_back(it->second); | |
} | |
//>> get all paths to newWord | |
auto biggerMapRange = biggerMapPtr->equal_range(newWord); | |
//>> combine to get ans | |
for(const auto &v : smallerMapVec) { | |
for(auto it = biggerMapRange.first; it != biggerMapRange.second; it++) { | |
vector<string> tmpVec = (isHeadSmaller) ? v : it->second; | |
const auto startIt = (isHeadSmaller) ? it->second.begin() : v.begin(); | |
const auto endIt = (isHeadSmaller) ? it->second.end() : v.end(); | |
for(auto tmpIt = startIt; tmpIt != endIt; tmpIt++) { | |
tmpVec.push_back(*tmpIt); | |
} | |
ans.push_back(tmpVec); | |
} | |
} | |
if(!isOver) { | |
isOver = true; | |
} | |
} | |
else if(wordSet.find(newWord) != wordSet.end() && !isOver) { | |
//>> newWord is valid but not the ans | |
nextReachableSet.insert(newWord); | |
//>> get all previous source | |
if(prvSourceVec.empty()) { | |
auto sourceRange = smallerMapPtr->equal_range(w); | |
for(auto it = sourceRange.first; it != sourceRange.second; it++) { | |
prvSourceVec.push_back(it->second); | |
} | |
} | |
//>> put to hash map | |
for(const auto &v : prvSourceVec) { | |
vector<string> nextVec = v; | |
if(isHeadSmaller) { | |
nextVec.push_back(newWord); | |
} | |
else { | |
nextVec.insert(nextVec.begin(), newWord); | |
} | |
smallerMapPtr->insert({newWord, nextVec}); | |
} | |
} | |
} | |
} | |
} | |
if(isOver) { | |
break; | |
} | |
//>> remove used words | |
for(const auto &w : nextReachableSet) { | |
wordSet.erase(w); | |
} | |
for(const auto &w : *smallerSetPtr) { | |
smallerMapPtr->erase(w); | |
} | |
//>> change smallerSet to nextReachableSet | |
*smallerSetPtr = nextReachableSet; | |
} | |
return ans; | |
} | |
}; |
沒有留言:
張貼留言