Skip to content

Commit

Permalink
wikispeech server self tests updated to match updates in pronlex as o…
Browse files Browse the repository at this point in the history
…f 2017-08-24
  • Loading branch information
HannaLindgren committed Aug 24, 2017
1 parent f97b1f3 commit 2376903
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 22 deletions.
16 changes: 8 additions & 8 deletions wikispeech_server/adapters/test_lexicon_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
class TestLexicon(unittest.TestCase):

def testNewLexicon(self):
lexicon_name = "pronlex:sv-se.nst"
lexicon_name = "sv_se_nst_lex:sv-se.nst"
lexicon = Lexicon(lexicon_name)
self.assertEqual(str(type(lexicon)), "<class 'wikispeech_server.adapters.lexicon_client.Lexicon'>")

def testLookup(self):
lexicon_name = "pronlex:sv-se.nst"
lexicon_name = "sv_se_nst_lex:sv-se.nst"
lexicon = Lexicon(lexicon_name)

orth = "apa"
Expand All @@ -30,7 +30,7 @@ def testLookup(self):

#expected = [{'entryValidations': [], 'partOfSpeech': 'NN', 'language': 'sv-se', 'transcriptions': [{'id': 79410, 'entryId': 74074, 'sources': [], 'language': 'sv-se', 'strn': '"" A: . p a'}], 'id': 74074, 'preferred': False, 'morphology': 'SIN|IND|NOM|UTR', 'lemma': {'id': 8764, 'paradigm': 's1a-flicka', 'reading': '', 'strn': 'apa'}, 'wordParts': 'apa', 'strn': 'apa', 'lexiconId': 1, 'status': {'name': 'imported', 'id': 74074, 'current': True, 'source': 'nst', 'timestamp': '2017-05-12T10:55:49Z'}}, {'entryValidations': [], 'partOfSpeech': 'VB', 'language': 'sv-se', 'transcriptions': [{'id': 79411, 'entryId': 74075, 'sources': [], 'language': 'sv-se', 'strn': '"" A: . p a'}], 'id': 74075, 'preferred': False, 'morphology': '', 'lemma': {'id': 8764, 'paradigm': 's1a-flicka', 'reading': '', 'strn': 'apa'}, 'wordParts': 'apa', 'strn': 'apa', 'lexiconId': 1, 'status': {'name': 'imported', 'id': 74075, 'current': True, 'source': 'nst', 'timestamp': '2017-05-12T10:55:49Z'}}, {'entryValidations': [], 'partOfSpeech': 'VB', 'language': 'sv-se', 'transcriptions': [{'id': 79412, 'entryId': 74076, 'sources': [], 'language': 'sv-se', 'strn': '"" A: . p a'}], 'id': 74076, 'preferred': False, 'morphology': 'AKT|INF-IMP', 'lemma': {'id': 8764, 'paradigm': 's1a-flicka', 'reading': '', 'strn': 'apa'}, 'wordParts': 'apa', 'strn': 'apa', 'lexiconId': 1, 'status': {'name': 'imported', 'id': 74076, 'current': True, 'source': 'nst', 'timestamp': '2017-05-12T10:55:49Z'}}]

expected = [{'lexRef': {'DBRef': 'pronlex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74074, 'strn': '"" A: . p a', 'id': 79410, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'NN', 'wordParts': 'apa', 'id': 74074, 'morphology': 'SIN|IND|NOM|UTR', 'status': {'source': 'nst', 'id': 74074, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}, {'lexRef': {'DBRef': 'pronlex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74075, 'strn': '"" A: . p a', 'id': 79411, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'VB', 'wordParts': 'apa', 'id': 74075, 'morphology': '', 'status': {'source': 'nst', 'id': 74075, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}, {'lexRef': {'DBRef': 'pronlex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74076, 'strn': '"" A: . p a', 'id': 79412, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'VB', 'wordParts': 'apa', 'id': 74076, 'morphology': 'AKT|INF-IMP', 'status': {'source': 'nst', 'id': 74076, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}]
expected = [{'lexRef': {'DBRef': 'sv_se_nst_lex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74074, 'strn': '"" A: . p a', 'id': 79410, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'NN', 'wordParts': 'apa', 'id': 74074, 'morphology': 'SIN|IND|NOM|UTR', 'status': {'source': 'nst', 'id': 74074, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}, {'lexRef': {'DBRef': 'sv_se_nst_lex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74075, 'strn': '"" A: . p a', 'id': 79411, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'VB', 'wordParts': 'apa', 'id': 74075, 'morphology': '', 'status': {'source': 'nst', 'id': 74075, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}, {'lexRef': {'DBRef': 'sv_se_nst_lex', 'LexName': 'sv-se.nst'}, 'lemma': {'strn': 'apa', 'paradigm': 's1a-flicka', 'reading': '', 'id': 8764}, 'strn': 'apa', 'transcriptions': [{'entryId': 74076, 'strn': '"" A: . p a', 'id': 79412, 'language': 'sv-se', 'sources': []}], 'preferred': False, 'partOfSpeech': 'VB', 'wordParts': 'apa', 'id': 74076, 'morphology': 'AKT|INF-IMP', 'status': {'source': 'nst', 'id': 74076, 'timestamp': '2017-08-17T11:57:08Z', 'name': 'imported', 'current': True}, 'language': 'sv-se', 'entryValidations': []}]

result = lexicon.lookup(orth)
log.info("RESULT: %s" % result)
Expand All @@ -41,17 +41,17 @@ def testLookup(self):
def testLexiconException1(self):
default_log_level = log.log_level
log.log_level = "fatal"
lexicon_name = "pronlex:sv-se.nst_THIS_LEXICON_SHOULD_NOT_EXIST"
lexicon_name = "sv_se_nst_lex:sv-se.nst_THIS_LEXICON_SHOULD_NOT_EXIST"
with self.assertRaises(LexiconException):
lexicon = Lexicon(lexicon_name)
log.log_level = default_log_level

def testLexiconException2(self):
default_log_level = log.log_level
log.log_level = "fatal"
lexicon_name = "pronlex:sv-se.nst_THIS_LEXICON_SHOULD_NOT_EXIST"
lexicon_name = "sv_se_nst_lex:sv-se.nst_THIS_LEXICON_SHOULD_NOT_EXIST"
with self.assertRaises(LexiconException):
lexicon = Lexicon("pronlex:sv-se.nst")
lexicon = Lexicon("sv_se_nst_lex:sv-se.nst")
lexicon.lexicon_name = lexicon_name
lexicon.lookup("apa")
log.log_level = default_log_level
Expand All @@ -61,7 +61,7 @@ def test_lexLookup(self):
lex_config = {
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst"
"lexicon":"sv_se_nst_lex:sv-se.nst"
}
utt = {
"lang": "sv",
Expand Down Expand Up @@ -99,7 +99,7 @@ def test_lexLookup_Exception(self):
lex_config = {
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst_DOES_NOT_EXIST"
"lexicon":"sv_se_nst_lex:sv-se.nst_DOES_NOT_EXIST"
}
utt = {
"lang": "sv",
Expand Down
4 changes: 2 additions & 2 deletions wikispeech_server/test_textprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def testNewTextprocessor(self):
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst"
"lexicon":"sv_se_nst_lex:sv-se.nst"
}
]
}
Expand All @@ -57,7 +57,7 @@ def testBrokenTextprocessor(self):
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst"
"lexicon":"sv_se_nst_lex:sv-se.nst"
}
]
}
Expand Down
2 changes: 1 addition & 1 deletion wikispeech_server/textprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def __init__(self, cconfig):
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst"
"lexicon":"sv_se_nst_lex:sv-se.nst"
}
]
}
Expand Down
8 changes: 4 additions & 4 deletions wikispeech_server/voice_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:sv-se.nst"
"lexicon":"sv_se_nst_lex:sv-se.nst"
}
]
}
Expand Down Expand Up @@ -49,7 +49,7 @@
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:en-us.cmu"
"lexicon":"en_am_cmu_lex:en-us.cmu"
}
]
}
Expand All @@ -66,7 +66,7 @@
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:en-us.cmu"
"lexicon":"en_am_cmu_lex:en-us.cmu"
}
]
}
Expand Down Expand Up @@ -97,7 +97,7 @@
{
"module":"adapters.lexicon_client",
"call":"lexLookup",
"lexicon":"pronlex:ar-test"
"lexicon":"ar_ar_tst_lex:ar-test"
}
]
}
Expand Down
2 changes: 1 addition & 1 deletion wikispeech_server/wikispeech.py
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ def getParam(param,default=None):


def test_lexicon_client():
lexicon = "pronlex:sv-se.nst"
lexicon = "sv_se_nst_lex:sv-se.nst"
sent = "apa hund färöarna"
trans = {}
trans["apa"] = '"" A: . p a'
Expand Down
2 changes: 1 addition & 1 deletion workflow_demo/ko_test.html
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@

self.entries([]);
var params = {
"lexicons": "pronlex:sv-se.nst",
"lexicons": "sv_se_nst_lex:sv-se.nst",
"words": search_term
}

Expand Down
10 changes: 5 additions & 5 deletions workflow_demo/test_workflow.js
Original file line number Diff line number Diff line change
Expand Up @@ -884,10 +884,10 @@ function searchLexicon(search_term, lang) {


if ( lang == "sv" ) {
var lexicons = "pronlex:sv-se.nst";
var lexicons = "sv_se_nst_lex:sv-se.nst";
}
else if ( lang == "en" ) {
var lexicons = "pronlex:en-us.cmu";
var lexicons = "en_am_cmu_lex:en-us.cmu";
}
else {
console.log("WARNING: no lexicon defined for lang "+lang);
Expand Down Expand Up @@ -917,7 +917,7 @@ function wordInLex(word, div, trans) {
var unknown_words_container = document.getElementById("unknown_words_container");

var params = {
"lexicons": "pronlex:sv-se.nst",
"lexicons": "sv_se_nst_lex:sv-se.nst",
"words": word
}

Expand Down Expand Up @@ -955,10 +955,10 @@ function wordsInLex(words, lang) {

//TODO hardcoded lexicon
if ( lang == "sv" ) {
var lexicons = "pronlex:sv-se.nst";
var lexicons = "sv_se_nst_lex:sv-se.nst";
}
else if ( lang == "en" ) {
var lexicons = "pronlex:en-us.cmu";
var lexicons = "en_am_cmu_lex:en-us.cmu";
}
else {
console.log("WARNING: no lexicon defined for lang "+lang);
Expand Down

0 comments on commit 2376903

Please sign in to comment.