Implemented US-English G2P using sound change rules
This commit is contained in:
parent
7a763e8755
commit
4ed5908627
|
@ -191,6 +191,7 @@ set(SOURCE_FILES
|
||||||
src/pairs.h
|
src/pairs.h
|
||||||
src/Exporter.cpp src/Exporter.h
|
src/Exporter.cpp src/Exporter.h
|
||||||
src/tokenization.cpp src/tokenization.h
|
src/tokenization.cpp src/tokenization.h
|
||||||
|
src/g2p.cpp src/g2p.h
|
||||||
)
|
)
|
||||||
add_executable(rhubarb ${SOURCE_FILES})
|
add_executable(rhubarb ${SOURCE_FILES})
|
||||||
target_link_libraries(rhubarb ${Boost_LIBRARIES} cppFormat sphinxbase pocketSphinx flite)
|
target_link_libraries(rhubarb ${Boost_LIBRARIES} cppFormat sphinxbase pocketSphinx flite)
|
||||||
|
@ -205,15 +206,18 @@ set(TEST_FILES
|
||||||
tests/ContinuousTimelineTests.cpp
|
tests/ContinuousTimelineTests.cpp
|
||||||
tests/pairsTests.cpp
|
tests/pairsTests.cpp
|
||||||
tests/tokenizationTests.cpp
|
tests/tokenizationTests.cpp
|
||||||
|
tests/g2pTests.cpp
|
||||||
src/stringTools.cpp src/stringTools.h
|
src/stringTools.cpp src/stringTools.h
|
||||||
src/Timeline.h
|
src/Timeline.h
|
||||||
src/TimeRange.cpp src/TimeRange.h
|
src/TimeRange.cpp src/TimeRange.h
|
||||||
src/centiseconds.cpp src/centiseconds.h
|
src/centiseconds.cpp src/centiseconds.h
|
||||||
src/pairs.h
|
src/pairs.h
|
||||||
|
src/Phone.cpp src/Phone.h
|
||||||
src/tokenization.cpp src/tokenization.h
|
src/tokenization.cpp src/tokenization.h
|
||||||
|
src/g2p.cpp src/g2p.h
|
||||||
)
|
)
|
||||||
add_executable(runTests ${TEST_FILES})
|
add_executable(runTests ${TEST_FILES})
|
||||||
target_link_libraries(runTests gtest gmock gmock_main flite)
|
target_link_libraries(runTests gtest gmock gmock_main flite cppFormat)
|
||||||
|
|
||||||
set(CPACK_PACKAGE_NAME ${appName})
|
set(CPACK_PACKAGE_NAME ${appName})
|
||||||
string(REPLACE " " "-" CPACK_PACKAGE_NAME "${CPACK_PACKAGE_NAME}")
|
string(REPLACE " " "-" CPACK_PACKAGE_NAME "${CPACK_PACKAGE_NAME}")
|
||||||
|
|
16
LICENSE.md
16
LICENSE.md
|
@ -146,4 +146,18 @@ The [CMU Flite](http://www.festvox.org/flite/) engine is released under a **BSD*
|
||||||
> 2. Any modifications must be clearly marked as such.
|
> 2. Any modifications must be clearly marked as such.
|
||||||
> 3. Original authors' names are not deleted.
|
> 3. Original authors' names are not deleted.
|
||||||
> 4. The authors' names are not used to endorse or promote products derived from this software without specific prior written permission.
|
> 4. The authors' names are not used to endorse or promote products derived from this software without specific prior written permission.
|
||||||
> CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
> CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
### Sound Change Applier
|
||||||
|
|
||||||
|
The [Sound Change Applier](http://www.zompist.com/sounds.htm) and its [rule set for American English](http://www.zompist.com/spell.html) are released under the **MIT License (MIT)**.
|
||||||
|
|
||||||
|
> **The MIT License (MIT)**
|
||||||
|
|
||||||
|
> Copyright (c) 2000 Mark Rosenfelder
|
||||||
|
|
||||||
|
> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,95 @@
|
||||||
|
#include <g2p.h>
|
||||||
|
#include <regex>
|
||||||
|
#include "stringTools.h"
|
||||||
|
|
||||||
|
using std::vector;
|
||||||
|
using std::wstring;
|
||||||
|
using std::regex;
|
||||||
|
using std::wregex;
|
||||||
|
using std::invalid_argument;
|
||||||
|
using std::pair;
|
||||||
|
|
||||||
|
const vector<pair<wregex, wstring>>& getReplacementRules() {
|
||||||
|
static vector<pair<wregex, wstring>> rules {
|
||||||
|
#include "g2pRules.cpp"
|
||||||
|
|
||||||
|
// Turn bigrams into unigrams for easier conversion
|
||||||
|
{ wregex(L"ôw"), L"Ω" },
|
||||||
|
{ wregex(L"öy"), L"ω" },
|
||||||
|
{ wregex(L"@r"), L"ɝ" }
|
||||||
|
};
|
||||||
|
return rules;
|
||||||
|
}
|
||||||
|
|
||||||
|
Phone charToPhone(wchar_t c) {
|
||||||
|
// For reference, see http://www.zompist.com/spell.html
|
||||||
|
switch (c) {
|
||||||
|
case L'ä': return Phone::EY;
|
||||||
|
case L'â': return Phone::AE;
|
||||||
|
case L'ë': return Phone::IY;
|
||||||
|
case L'ê': return Phone::EH;
|
||||||
|
case L'ï': return Phone::AY;
|
||||||
|
case L'î': return Phone::IH;
|
||||||
|
case L'ö': return Phone::OW;
|
||||||
|
case L'ô': return Phone::AA; // could also be AO/AH
|
||||||
|
case L'ü': return Phone::UW; // really Y+UW
|
||||||
|
case L'û': return Phone::AH; // [ʌ] as in b[u]t
|
||||||
|
case L'u': return Phone::UW;
|
||||||
|
case L'ò': return Phone::AO;
|
||||||
|
case L'ù': return Phone::UH;
|
||||||
|
case L'@': return Phone::AH; // [ə] as in [a]lone
|
||||||
|
case L'Ω': return Phone::AW;
|
||||||
|
case L'ω': return Phone::OY;
|
||||||
|
case L'y': return Phone::Y;
|
||||||
|
case L'w': return Phone::W;
|
||||||
|
case L'ɝ': return Phone::ER;
|
||||||
|
case L'p': return Phone::P;
|
||||||
|
case L'b': return Phone::B;
|
||||||
|
case L't': return Phone::T;
|
||||||
|
case L'd': return Phone::D;
|
||||||
|
case L'g': return Phone::G;
|
||||||
|
case L'k': return Phone::K;
|
||||||
|
case L'm': return Phone::M;
|
||||||
|
case L'n': return Phone::N;
|
||||||
|
case L'ñ': return Phone::NG;
|
||||||
|
case L'f': return Phone::F;
|
||||||
|
case L'v': return Phone::V;
|
||||||
|
case L'+': return Phone::TH; // also covers DH
|
||||||
|
case L's': return Phone::S;
|
||||||
|
case L'z': return Phone::Z;
|
||||||
|
case L'$': return Phone::SH; // also covers ZH
|
||||||
|
case L'ç': return Phone::CH;
|
||||||
|
case L'j': return Phone::JH;
|
||||||
|
case L'r': return Phone::R;
|
||||||
|
case L'l': return Phone::L;
|
||||||
|
case L'h': return Phone::HH;
|
||||||
|
}
|
||||||
|
return Phone::Unknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<Phone> wordToPhones(const std::string& word) {
|
||||||
|
static regex validWord("^[a-z']*$");
|
||||||
|
if (!regex_match(word, validWord)) {
|
||||||
|
throw invalid_argument(fmt::format("Word '{}' contains illegal characters.", word));
|
||||||
|
}
|
||||||
|
|
||||||
|
wstring wideWord = latin1ToWide(word);
|
||||||
|
for (const auto& rule : getReplacementRules()) {
|
||||||
|
const wregex& regex = rule.first;
|
||||||
|
const wstring& replacement = rule.second;
|
||||||
|
|
||||||
|
// Repeatedly apply rule until there is no more change
|
||||||
|
bool changed;
|
||||||
|
do {
|
||||||
|
wstring tmp = regex_replace(wideWord, regex, replacement);
|
||||||
|
changed = tmp != wideWord;
|
||||||
|
wideWord = tmp;
|
||||||
|
} while (changed);
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<Phone> result;
|
||||||
|
for (wchar_t c : wideWord) {
|
||||||
|
result.push_back(charToPhone(c));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
|
@ -0,0 +1,6 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include "Phone.h"
|
||||||
|
|
||||||
|
std::vector<Phone> wordToPhones(const std::string& word);
|
|
@ -0,0 +1,208 @@
|
||||||
|
// Generated by g2pRules.rb; don't modify by hand!
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rules
|
||||||
|
//
|
||||||
|
// get rid of some digraphs
|
||||||
|
{ wregex(L"ch"), L"ç" },
|
||||||
|
{ wregex(L"sh"), L"$" },
|
||||||
|
{ wregex(L"ph"), L"f" },
|
||||||
|
{ wregex(L"th"), L"+" },
|
||||||
|
{ wregex(L"qu"), L"kw" },
|
||||||
|
// and other spelling-level changes
|
||||||
|
{ wregex(L"w(r)"), L"$1" },
|
||||||
|
{ wregex(L"w(ho)"), L"$1" },
|
||||||
|
{ wregex(L"(w)h"), L"$1" },
|
||||||
|
{ wregex(L"(^r)h"), L"$1" },
|
||||||
|
{ wregex(L"(x)h"), L"$1" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@])h($)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^e)x([aeiouäëïöüâêîôûùò@])"), L"$1gz$2" },
|
||||||
|
{ wregex(L"x"), L"ks" },
|
||||||
|
{ wregex(L"'"), L"" },
|
||||||
|
// gh is particularly variable
|
||||||
|
{ wregex(L"gh([aeiouäëïöüâêîôûùò@])"), L"g$1" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])a(gh)"), L"$1ä$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])e(gh)"), L"$1ë$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])i(gh)"), L"$1ï$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])o(gh)"), L"$1ö$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])u(gh)"), L"$1ü$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])â(gh)"), L"$1ä$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])ê(gh)"), L"$1ë$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])î(gh)"), L"$1ï$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])ô(gh)"), L"$1ö$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])û(gh)"), L"$1ü$2" },
|
||||||
|
{ wregex(L"ough(t)"), L"ò$1" },
|
||||||
|
{ wregex(L"augh(t)"), L"ò$1" },
|
||||||
|
{ wregex(L"ough"), L"ö" },
|
||||||
|
{ wregex(L"gh"), L"" },
|
||||||
|
// unpronounceable combinations
|
||||||
|
{ wregex(L"(^)g(n)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^)k(n)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^)m(n)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^)p(t)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^)p(s)"), L"$1$2" },
|
||||||
|
{ wregex(L"(^)t(m)"), L"$1$2" },
|
||||||
|
// medial y = i
|
||||||
|
{ wregex(L"(^[bcdfghjklmnpqrstvwxyzç+$ñ])y($)"), L"$1ï$2" },
|
||||||
|
{ wregex(L"(^[bcdfghjklmnpqrstvwxyzç+$ñ]{2})y($)"), L"$1ï$2" },
|
||||||
|
{ wregex(L"(^[bcdfghjklmnpqrstvwxyzç+$ñ]{3})y($)"), L"$1ï$2" },
|
||||||
|
{ wregex(L"ey"), L"ë" },
|
||||||
|
{ wregex(L"ay"), L"ä" },
|
||||||
|
{ wregex(L"oy"), L"öy" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])y([bcdfghjklmnpqrstvwxyzç+$ñ])"), L"$1i$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])y($)"), L"$1i$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])y(e$)"), L"$1i$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ]{2})ie($)"), L"$1ï$2" },
|
||||||
|
{ wregex(L"(^[bcdfghjklmnpqrstvwxyzç+$ñ])ie($)"), L"$1ï$2" },
|
||||||
|
// sSl can simplify
|
||||||
|
{ wregex(L"(s)t(l[aeiouäëïöüâêîôûùò@]$)"), L"$1$2" },
|
||||||
|
// affrication of t + front vowel
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])ci([aeiouäëïöüâêîôûùò@])"), L"$1$$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])ti([aeiouäëïöüâêîôûùò@])"), L"$1$$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])tu([aeiouäëïöüâêîôûùò@])"), L"$1çu$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])tu([rl][aeiouäëïöüâêîôûùò@])"), L"$1çu$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])si(o)"), L"$1$$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@])si(o)"), L"$1j$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])s(ur)"), L"$1$$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@])s(ur)"), L"$1j$2" },
|
||||||
|
{ wregex(L"(k)s(u[aeiouäëïöüâêîôûùò@])"), L"$1$$2" },
|
||||||
|
{ wregex(L"(k)s(u[rl])"), L"$1$$2" },
|
||||||
|
// intervocalic s
|
||||||
|
{ wregex(L"([eiou])s([aeiouäëïöüâêîôûùò@])"), L"$1z$2" },
|
||||||
|
// al to ol (do this before respelling)
|
||||||
|
{ wregex(L"a(ls)"), L"ò$1" },
|
||||||
|
{ wregex(L"a(lr)"), L"ò$1" },
|
||||||
|
{ wregex(L"a(l{2}$)"), L"ò$1" },
|
||||||
|
{ wregex(L"a(lm(?:[aeiouäëïöüâêîôûùò@])?$)"), L"ò$1" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])a(l[td+])"), L"$1ò$2" },
|
||||||
|
{ wregex(L"(^)a(l[td+])"), L"$1ò$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])al(k)"), L"$1ò$2" },
|
||||||
|
// soft c and g
|
||||||
|
{ wregex(L"c([eiêîy])"), L"s$1" },
|
||||||
|
{ wregex(L"c"), L"k" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])ge(a)"), L"$1j$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])ge(o)"), L"$1j$2" },
|
||||||
|
{ wregex(L"g([eiêîy])"), L"j$1" },
|
||||||
|
// init/final guF was there just to harden the g
|
||||||
|
{ wregex(L"(^)gu([eiêîy])"), L"$1g$2" },
|
||||||
|
{ wregex(L"gu(e$)"), L"g$1" },
|
||||||
|
// untangle reverse-written final liquids
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])re($)"), L"$1@r$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])le($)"), L"$1@l$2" },
|
||||||
|
// vowels are long medially
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])a([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ä$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])e([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ë$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])i([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ï$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])o([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ö$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])u([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ü$2" },
|
||||||
|
{ wregex(L"(^)a([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ä$2" }, { wregex(L"(^)e([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ë$2" }, { wregex(L"(^)i([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ï$2" }, { wregex(L"(^)o([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ö$2" }, { wregex(L"(^)u([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@])"), L"$1ü$2" },
|
||||||
|
// and short before 2 consonants or a final one
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])a([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1â$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])e([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1ê$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])i([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1î$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])o([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1ô$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])u([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1û$2" },
|
||||||
|
{ wregex(L"(^)a([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1â$2" }, { wregex(L"(^)e([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1ê$2" }, { wregex(L"(^)i([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1î$2" }, { wregex(L"(^)o([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1ô$2" }, { wregex(L"(^)u([bcdfghjklmnpqrstvwxyzç+$ñ]{2})"), L"$1û$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])a([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1â$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])e([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1ê$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])i([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1î$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])o([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1ô$2" }, { wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñ])u([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1û$2" },
|
||||||
|
{ wregex(L"(^)a([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1â$2" }, { wregex(L"(^)e([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1ê$2" }, { wregex(L"(^)i([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1î$2" }, { wregex(L"(^)o([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1ô$2" }, { wregex(L"(^)u([bcdfghjklmnpqrstvwxyzç+$ñ]$)"), L"$1û$2" },
|
||||||
|
// special but general rules
|
||||||
|
{ wregex(L"î(nd$)"), L"ï$1" },
|
||||||
|
{ wregex(L"ô(s{2}$)"), L"ò$1" },
|
||||||
|
{ wregex(L"ô(g$)"), L"ò$1" },
|
||||||
|
{ wregex(L"ô(f[bcdfghjklmnpqrstvwxyzç+$ñ])"), L"ò$1" },
|
||||||
|
{ wregex(L"ô(l[td+])"), L"ö$1" },
|
||||||
|
{ wregex(L"(w)â(\\$)"), L"$1ò$2" },
|
||||||
|
{ wregex(L"(w)â((?:t)?ç)"), L"$1ò$2" },
|
||||||
|
{ wregex(L"(w)â([tdns+])"), L"$1ô$2" },
|
||||||
|
// soft gn
|
||||||
|
{ wregex(L"îg([mnñ]$)"), L"ï$1" },
|
||||||
|
{ wregex(L"îg([mnñ][bcdfghjklmnpqrstvwxyzç+$ñ])"), L"ï$1" },
|
||||||
|
{ wregex(L"(ei)g(n)"), L"$1$2" },
|
||||||
|
// handle ous before removing -e
|
||||||
|
{ wregex(L"ou(s$)"), L"@$1" },
|
||||||
|
{ wregex(L"ou(s[bcdfghjklmnpqrstvwxyzç+$ñ])"), L"@$1" },
|
||||||
|
// remove silent -e
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)e($)"), L"$1$2" },
|
||||||
|
// common suffixes that hide a silent e
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})ë(mênt$)"), L"$1$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})ë(nês{2}$)"), L"$1$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})ë(li$)"), L"$1$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})ë(fûl$)"), L"$1$2" },
|
||||||
|
// another common suffix
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})ï(nês{2}$)"), L"$1ë$2" },
|
||||||
|
// shorten (1-char) weak penults after a long
|
||||||
|
// note: this error breaks almost as many words as it fixes...
|
||||||
|
{ wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ä([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1â$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ë([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ê$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ï([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1î$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ö([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ô$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ü([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1û$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ä([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1â$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ë([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ê$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ï([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1î$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ö([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ô$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ü([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1û$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ä([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1â$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ë([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ê$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ï([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1î$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ö([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1ô$2" }, { wregex(L"([äëïöüäëïöüäëïöüùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?(?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ü([bcdfghjklmnpqrstvwxyzç+$ñ][aeiouäëïöüâêîôûùò@]$)"), L"$1û$2" },
|
||||||
|
// double vowels
|
||||||
|
{ wregex(L"eau"), L"ö" },
|
||||||
|
{ wregex(L"ai"), L"ä" },
|
||||||
|
{ wregex(L"au"), L"ò" },
|
||||||
|
{ wregex(L"âw"), L"ò" },
|
||||||
|
{ wregex(L"e{2}"), L"ë" },
|
||||||
|
{ wregex(L"ea"), L"ë" },
|
||||||
|
{ wregex(L"(s)ei"), L"$1ë" },
|
||||||
|
{ wregex(L"ei"), L"ä" },
|
||||||
|
{ wregex(L"eo"), L"ë@" },
|
||||||
|
{ wregex(L"êw"), L"ü" },
|
||||||
|
{ wregex(L"eu"), L"ü" },
|
||||||
|
{ wregex(L"ie"), L"ë" },
|
||||||
|
{ wregex(L"(i)[aeiouäëïöüâêîôûùò@]"), L"$1@" },
|
||||||
|
{ wregex(L"(^[bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)i"), L"$1ï" },
|
||||||
|
{ wregex(L"i(@)"), L"ë$1" },
|
||||||
|
{ wregex(L"oa"), L"ö" },
|
||||||
|
{ wregex(L"oe($)"), L"ö$1" },
|
||||||
|
{ wregex(L"o{2}(k)"), L"ù$1" },
|
||||||
|
{ wregex(L"o{2}"), L"u" },
|
||||||
|
{ wregex(L"oul(d$)"), L"ù$1" },
|
||||||
|
{ wregex(L"ou"), L"ôw" },
|
||||||
|
{ wregex(L"oi"), L"öy" },
|
||||||
|
{ wregex(L"ua"), L"ü@" },
|
||||||
|
{ wregex(L"ue"), L"u" },
|
||||||
|
{ wregex(L"ui"), L"u" },
|
||||||
|
{ wregex(L"ôw($)"), L"ö$1" },
|
||||||
|
// those pesky final syllabics
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[aeiouäëïöüâêîôûùò@])?)[aeiouäëïöüâêîôûùò@](l$)"), L"$1@$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ê(n$)"), L"$1@$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)î(n$)"), L"$1@$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)â(n$)"), L"$1@$2" },
|
||||||
|
{ wregex(L"([aeiouäëïöüâêîôûùò@][bcdfghjklmnpqrstvwxyzç+$ñ](?:[bcdfghjklmnpqrstvwxyzç+$ñ])?)ô(n$)"), L"$1@$2" },
|
||||||
|
// suffix simplifications
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]{3})[aâä](b@l$)"), L"$1@$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]l)ë(@n$)"), L"$1y$2" },
|
||||||
|
{ wregex(L"([bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@]n)ë(@n$)"), L"$1y$2" },
|
||||||
|
// unpronounceable finals
|
||||||
|
{ wregex(L"(m)b($)"), L"$1$2" },
|
||||||
|
{ wregex(L"(m)n($)"), L"$1$2" },
|
||||||
|
// color the final vowels
|
||||||
|
{ wregex(L"a($)"), L"@$1" },
|
||||||
|
{ wregex(L"e($)"), L"ë$1" },
|
||||||
|
{ wregex(L"i($)"), L"ë$1" },
|
||||||
|
{ wregex(L"o($)"), L"ö$1" },
|
||||||
|
// vowels before r V=aeiouäëïöüâêîôûùò@
|
||||||
|
{ wregex(L"ôw(r[bcdfghjklmnpqrstvwxyzç+$ñaeiouäëïöüâêîôûùò@])"), L"ö$1" },
|
||||||
|
{ wregex(L"ô(r)"), L"ö$1" },
|
||||||
|
{ wregex(L"ò(r)"), L"ö$1" },
|
||||||
|
{ wregex(L"(w)â(r[bcdfghjklmnpqrstvwxyzç+$ñ])"), L"$1ö$2" },
|
||||||
|
{ wregex(L"(w)â(r$)"), L"$1ö$2" },
|
||||||
|
{ wregex(L"ê(r{2})"), L"ä$1" },
|
||||||
|
{ wregex(L"ë(r[iîï][bcdfghjklmnpqrstvwxyzç+$ñ])"), L"ä$1" },
|
||||||
|
{ wregex(L"â(r{2})"), L"ä$1" },
|
||||||
|
{ wregex(L"â(r[bcdfghjklmnpqrstvwxyzç+$ñ])"), L"ô$1" },
|
||||||
|
{ wregex(L"â(r$)"), L"ô$1" },
|
||||||
|
{ wregex(L"â(r)"), L"ä$1" },
|
||||||
|
{ wregex(L"ê(r)"), L"@$1" },
|
||||||
|
{ wregex(L"î(r)"), L"@$1" },
|
||||||
|
{ wregex(L"û(r)"), L"@$1" },
|
||||||
|
{ wregex(L"ù(r)"), L"@$1" },
|
||||||
|
// handle ng
|
||||||
|
{ wregex(L"ng([fs$+])"), L"ñ$1" },
|
||||||
|
{ wregex(L"ng([bdg])"), L"ñ$1" },
|
||||||
|
{ wregex(L"ng([ptk])"), L"ñ$1" },
|
||||||
|
{ wregex(L"ng($)"), L"ñ$1" },
|
||||||
|
{ wregex(L"n(g)"), L"ñ$1" },
|
||||||
|
{ wregex(L"n(k)"), L"ñ$1" },
|
||||||
|
{ wregex(L"ô(ñ)"), L"ò$1" },
|
||||||
|
{ wregex(L"â(ñ)"), L"ä$1" },
|
||||||
|
// really a morphophonological rule, but it's cute
|
||||||
|
{ wregex(L"([bdg])s($)"), L"$1z$2" },
|
||||||
|
{ wregex(L"s(m$)"), L"z$1" },
|
||||||
|
// double consonants
|
||||||
|
{ wregex(L"s(s)"), L"$1" },
|
||||||
|
{ wregex(L"s(\\$)"), L"$1" },
|
||||||
|
{ wregex(L"t(t)"), L"$1" },
|
||||||
|
{ wregex(L"t(ç)"), L"$1" },
|
||||||
|
{ wregex(L"p(p)"), L"$1" },
|
||||||
|
{ wregex(L"k(k)"), L"$1" },
|
||||||
|
{ wregex(L"b(b)"), L"$1" },
|
||||||
|
{ wregex(L"d(d)"), L"$1" },
|
||||||
|
{ wregex(L"d(j)"), L"$1" },
|
||||||
|
{ wregex(L"g(g)"), L"$1" },
|
||||||
|
{ wregex(L"n(n)"), L"$1" },
|
||||||
|
{ wregex(L"m(m)"), L"$1" },
|
||||||
|
{ wregex(L"r(r)"), L"$1" },
|
||||||
|
{ wregex(L"l(l)"), L"$1" },
|
||||||
|
{ wregex(L"f(f)"), L"$1" },
|
||||||
|
{ wregex(L"z(z)"), L"$1" },
|
|
@ -0,0 +1,115 @@
|
||||||
|
# This script reads transformation rules for English G2P from a text file
|
||||||
|
# and generates C++ code.
|
||||||
|
# The rule format is described here: http://www.zompist.com/sounds.htm
|
||||||
|
|
||||||
|
$characterClasses = {}
|
||||||
|
|
||||||
|
def formatRule(searchValue, replaceValue, contextBegin, contextEnd)
|
||||||
|
return nil if replaceValue == searchValue
|
||||||
|
|
||||||
|
# Special case: search and replace values are character classes
|
||||||
|
if $characterClasses.has_key?(searchValue) && $characterClasses.has_key?(replaceValue)
|
||||||
|
searchCharacters = $characterClasses[searchValue]
|
||||||
|
replaceCharacters = $characterClasses[replaceValue]
|
||||||
|
count = [searchCharacters.length, replaceCharacters.length].min
|
||||||
|
result = ''
|
||||||
|
0.upto(count - 1) do |i|
|
||||||
|
subrule = formatRule(searchCharacters[i], replaceCharacters[i], contextBegin, contextEnd)
|
||||||
|
if subrule
|
||||||
|
result << ' ' if !result.empty?
|
||||||
|
result << subrule
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return result
|
||||||
|
end
|
||||||
|
|
||||||
|
# Special characters
|
||||||
|
searchValue = Regexp.escape(searchValue)
|
||||||
|
contextBegin = Regexp.escape(contextBegin)
|
||||||
|
contextEnd = Regexp.escape(contextEnd)
|
||||||
|
|
||||||
|
# Anchors
|
||||||
|
contextBegin.sub!(/^\\\#/, '^')
|
||||||
|
contextEnd.sub!(/\\\#$/, '$')
|
||||||
|
|
||||||
|
hasContextBegin = contextBegin != ''
|
||||||
|
hasContextEnd = contextEnd != ''
|
||||||
|
regexString = searchValue;
|
||||||
|
if hasContextBegin
|
||||||
|
regexString = "(#{contextBegin})" + regexString
|
||||||
|
replaceValue = "$1" + replaceValue
|
||||||
|
end
|
||||||
|
if hasContextEnd
|
||||||
|
regexString = regexString + "(#{contextEnd})"
|
||||||
|
replaceValue = replaceValue + (hasContextBegin ? "$2" : "$1")
|
||||||
|
end
|
||||||
|
|
||||||
|
# Optional parts
|
||||||
|
regexString.gsub!(/\\\((.*?)\\\)/, '(?:\\1)?')
|
||||||
|
|
||||||
|
# Fold repeated characters/classes
|
||||||
|
regexString.gsub!(/([w])\1\1\1\1/, '\\1{5}')
|
||||||
|
regexString.gsub!(/(\w)\1\1\1/, '\\1{4}')
|
||||||
|
regexString.gsub!(/(\w)\1\1/, '\\1{3}')
|
||||||
|
regexString.gsub!(/(\w)\1/, '\\1{2}')
|
||||||
|
|
||||||
|
# Character classes
|
||||||
|
regexString.gsub!(/./) do |ch|
|
||||||
|
$characterClasses.has_key?(ch) ? "[#{$characterClasses[ch]}]" : ch
|
||||||
|
end
|
||||||
|
|
||||||
|
# C++ string escaping
|
||||||
|
regexString.gsub!(/[\\"]/, '\\\\\\\\')
|
||||||
|
replaceValue.gsub!(/[\\"]/, '\\\\\\\\')
|
||||||
|
|
||||||
|
return "{ wregex(L\"#{regexString}\"), L\"#{replaceValue}\" },"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Read rules
|
||||||
|
lines = File.read('../lib/soundchange/english.sc', :encoding => 'iso-8859-1').split(/\r?\n/)
|
||||||
|
|
||||||
|
# Parse character class definitions
|
||||||
|
characterClassLineCount = 0
|
||||||
|
lines.each_with_index do |line, index|
|
||||||
|
# Skip comments
|
||||||
|
next if line.start_with? '*'
|
||||||
|
|
||||||
|
match = /^(.)=(.+)$/.match(line)
|
||||||
|
if match
|
||||||
|
characterClassLineCount = index + 1
|
||||||
|
else
|
||||||
|
break
|
||||||
|
end
|
||||||
|
|
||||||
|
name = match[1]
|
||||||
|
value = match[2]
|
||||||
|
$characterClasses[name] = value
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parse rules and convert them to C++
|
||||||
|
File.open('g2pRules.cpp', 'w:UTF-8') do |file|
|
||||||
|
file.print "// Generated by #{__FILE__}; don't modify by hand!\n\n"
|
||||||
|
lines.drop(characterClassLineCount).each do |line|
|
||||||
|
# Handle comments
|
||||||
|
comment = /^\*(.*)$/.match(line)
|
||||||
|
if comment
|
||||||
|
file.puts "//#{comment[1]}"
|
||||||
|
next
|
||||||
|
end
|
||||||
|
|
||||||
|
# Handle rules
|
||||||
|
rule = /^(.+)\/(.*)\/(.*)_(.*)$/.match(line)
|
||||||
|
if rule
|
||||||
|
searchValue = rule[1]
|
||||||
|
replaceValue = rule[2]
|
||||||
|
contextBegin = rule[3]
|
||||||
|
contextEnd = rule[4]
|
||||||
|
|
||||||
|
file.puts formatRule(searchValue, replaceValue, contextBegin, contextEnd)
|
||||||
|
next
|
||||||
|
end
|
||||||
|
|
||||||
|
raise "Invalid rule: #{line}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include "g2p.h"
|
||||||
|
|
||||||
|
using namespace testing;
|
||||||
|
using std::vector;
|
||||||
|
using std::pair;
|
||||||
|
using std::string;
|
||||||
|
|
||||||
|
TEST(wordToPhones, basic) {
|
||||||
|
EXPECT_THAT(wordToPhones(""), IsEmpty());
|
||||||
|
|
||||||
|
EXPECT_ANY_THROW(wordToPhones("Invalid"));
|
||||||
|
|
||||||
|
// The following phones are based on actual output, *not* ideal output.
|
||||||
|
vector<pair<string, vector<Phone>>> words {
|
||||||
|
{ "once", { Phone::AA, Phone::N, Phone::S }},
|
||||||
|
{ "upon", { Phone::UW, Phone::P, Phone::AH, Phone::N }},
|
||||||
|
{ "a", { Phone::AH }},
|
||||||
|
{ "midnight", { Phone::M, Phone::IH, Phone::D, Phone::N, Phone::AY, Phone::T }},
|
||||||
|
{ "dreary", { Phone::D, Phone::R, Phone::IY, Phone::R, Phone::IY }},
|
||||||
|
{ "while", { Phone::W, Phone::AY, Phone::L }},
|
||||||
|
{ "i", { Phone::IY }},
|
||||||
|
{ "pondered", { Phone::P, Phone::AA, Phone::N, Phone::D, Phone::IY, Phone::R, Phone::EH, Phone::D }},
|
||||||
|
{ "weak", { Phone::W, Phone::IY, Phone::K }},
|
||||||
|
{ "and", { Phone::AE, Phone::N, Phone::D }},
|
||||||
|
{ "weary", { Phone::W, Phone::IY, Phone::R, Phone::IY }}
|
||||||
|
};
|
||||||
|
for (const auto& word : words) {
|
||||||
|
EXPECT_THAT(wordToPhones(word.first), ElementsAreArray(word.second)) << "Original word: '" << word.first << "'";
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue