From d5ba6605eedf17f589c8eac1794a6d3a116c1564 Mon Sep 17 00:00:00 2001 From: Dustin Popp Date: Mon, 29 Oct 2018 15:16:50 -0500 Subject: [PATCH 1/3] feat(discovery): add new methods: `createTokenizationDictionary`, `deleteTokenizationDictionary`, and `getTokenizationDictionaryStatus` --- assistant/v1.ts | 213 ++++++ assistant/v2.ts | 17 +- discovery/v1-generated.ts | 439 +++++++++++- language-translator/v3.ts | 25 + natural-language-classifier/v1-generated.ts | 24 + .../v1-generated.ts | 95 +-- personality-insights/v3-generated.ts | 8 + speech-to-text/v1-generated.ts | 653 ++++++++++++------ text-to-speech/v1-generated.ts | 108 ++- tone-analyzer/v3-generated.ts | 10 +- 10 files changed, 1314 insertions(+), 278 deletions(-) diff --git a/assistant/v1.ts b/assistant/v1.ts index f0a6d3a1de..24c937a5cf 100644 --- a/assistant/v1.ts +++ b/assistant/v1.ts @@ -90,10 +90,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'input': _params.input, 'alternate_intents': _params.alternate_intents, @@ -102,12 +104,15 @@ class AssistantV1 extends BaseService { 'intents': _params.intents, 'output': _params.output }; + const query = { 'nodes_visited_details': _params.nodes_visited_details }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/message', @@ -124,6 +129,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -161,6 +167,7 @@ class AssistantV1 extends BaseService { public createWorkspace(params?: AssistantV1.CreateWorkspaceParams, callback?: AssistantV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const body = { 'name': _params.name, 'description': _params.description, @@ -173,6 +180,7 @@ class AssistantV1 extends BaseService { 'learning_opt_out': _params.learning_opt_out, 'system_settings': _params.system_settings }; + const parameters = { options: { url: '/v1/workspaces', @@ -187,6 +195,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -207,13 +216,16 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}', @@ -226,6 +238,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -252,17 +265,21 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}', @@ -276,6 +293,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -301,6 +319,7 @@ class AssistantV1 extends BaseService { public listWorkspaces(params?: AssistantV1.ListWorkspacesParams, callback?: AssistantV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'page_limit': _params.page_limit, 'include_count': _params.include_count, @@ -308,6 +327,7 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const parameters = { options: { url: '/v1/workspaces', @@ -320,6 +340,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -362,10 +383,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, @@ -378,12 +401,15 @@ class AssistantV1 extends BaseService { 'learning_opt_out': _params.learning_opt_out, 'system_settings': _params.system_settings }; + const query = { 'append': _params.append }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}', @@ -400,6 +426,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -431,18 +458,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'intent': _params.intent, 'description': _params.description, 'examples': _params.examples }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents', @@ -458,6 +489,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -479,14 +511,17 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}', @@ -499,6 +534,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -526,18 +562,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}', @@ -551,6 +591,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -582,10 +623,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'page_limit': _params.page_limit, @@ -594,9 +637,11 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents', @@ -610,6 +655,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -639,19 +685,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'intent': _params.new_intent, 'description': _params.new_description, 'examples': _params.new_examples }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}', @@ -667,6 +717,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -698,18 +749,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.text, 'mentions': _params.mentions }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples', @@ -725,6 +780,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -747,15 +803,18 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}', @@ -768,6 +827,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -792,18 +852,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}', @@ -817,6 +881,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -845,10 +910,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'page_limit': _params.page_limit, 'include_count': _params.include_count, @@ -856,10 +923,12 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples', @@ -873,6 +942,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -901,19 +971,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'intent', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.new_text, 'mentions': _params.new_mentions }; + const path = { 'workspace_id': _params.workspace_id, 'intent': _params.intent, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}', @@ -929,6 +1003,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -958,16 +1033,20 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.text }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/counterexamples', @@ -983,6 +1062,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1004,14 +1084,17 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', @@ -1024,6 +1107,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1047,17 +1131,21 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', @@ -1071,6 +1159,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1098,10 +1187,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'page_limit': _params.page_limit, 'include_count': _params.include_count, @@ -1109,9 +1200,11 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/counterexamples', @@ -1125,6 +1218,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1147,17 +1241,21 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.new_text }; + const path = { 'workspace_id': _params.workspace_id, 'text': _params.text }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', @@ -1173,6 +1271,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1206,10 +1305,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'entity': _params.entity, 'description': _params.description, @@ -1217,9 +1318,11 @@ class AssistantV1 extends BaseService { 'values': _params.values, 'fuzzy_match': _params.fuzzy_match }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities', @@ -1235,6 +1338,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1256,14 +1360,17 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}', @@ -1276,6 +1383,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1303,18 +1411,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}', @@ -1328,6 +1440,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1359,10 +1472,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'page_limit': _params.page_limit, @@ -1371,9 +1486,11 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities', @@ -1387,6 +1504,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1419,10 +1537,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'entity': _params.new_entity, 'description': _params.new_description, @@ -1430,10 +1550,12 @@ class AssistantV1 extends BaseService { 'fuzzy_match': _params.new_fuzzy_match, 'values': _params.new_values }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}', @@ -1449,6 +1571,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1480,18 +1603,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/mentions', @@ -1505,6 +1632,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1547,10 +1675,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'value': _params.value, 'metadata': _params.metadata, @@ -1558,10 +1688,12 @@ class AssistantV1 extends BaseService { 'patterns': _params.patterns, 'type': _params.value_type }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', @@ -1577,6 +1709,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1599,15 +1732,18 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', @@ -1620,6 +1756,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1647,19 +1784,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', @@ -1673,6 +1814,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1704,10 +1846,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'export': _params.export, 'page_limit': _params.page_limit, @@ -1716,10 +1860,12 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', @@ -1733,6 +1879,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1773,10 +1920,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'value': _params.new_value, 'metadata': _params.new_metadata, @@ -1784,11 +1933,13 @@ class AssistantV1 extends BaseService { 'synonyms': _params.new_synonyms, 'patterns': _params.new_patterns }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', @@ -1804,6 +1955,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1834,18 +1986,22 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'synonym': _params.synonym }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', @@ -1861,6 +2017,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1884,16 +2041,19 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value, 'synonym': _params.synonym }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', @@ -1906,6 +2066,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1931,19 +2092,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value, 'synonym': _params.synonym }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', @@ -1957,6 +2122,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1986,10 +2152,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'page_limit': _params.page_limit, 'include_count': _params.include_count, @@ -1997,11 +2165,13 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', @@ -2015,6 +2185,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2043,19 +2214,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'synonym': _params.new_synonym }; + const path = { 'workspace_id': _params.workspace_id, 'entity': _params.entity, 'value': _params.value, 'synonym': _params.synonym }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', @@ -2071,6 +2246,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2125,10 +2301,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'dialog_node']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'dialog_node': _params.dialog_node, 'description': _params.description, @@ -2149,9 +2327,11 @@ class AssistantV1 extends BaseService { 'digress_out_slots': _params.digress_out_slots, 'user_label': _params.user_label }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/dialog_nodes', @@ -2167,6 +2347,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2188,14 +2369,17 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'dialog_node']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'workspace_id': _params.workspace_id, 'dialog_node': _params.dialog_node }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', @@ -2208,6 +2392,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2231,17 +2416,21 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'dialog_node']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id, 'dialog_node': _params.dialog_node }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', @@ -2255,6 +2444,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2282,10 +2472,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'page_limit': _params.page_limit, 'include_count': _params.include_count, @@ -2293,9 +2485,11 @@ class AssistantV1 extends BaseService { 'cursor': _params.cursor, 'include_audit': _params.include_audit }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/dialog_nodes', @@ -2309,6 +2503,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2361,10 +2556,12 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id', 'dialog_node']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'dialog_node': _params.new_dialog_node, 'description': _params.new_description, @@ -2385,10 +2582,12 @@ class AssistantV1 extends BaseService { 'digress_out_slots': _params.new_digress_out_slots, 'user_label': _params.new_user_label }; + const path = { 'workspace_id': _params.workspace_id, 'dialog_node': _params.dialog_node }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', @@ -2404,6 +2603,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2436,16 +2636,19 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['filter']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'filter': _params.filter, 'sort': _params.sort, 'page_limit': _params.page_limit, 'cursor': _params.cursor }; + const parameters = { options: { url: '/v1/logs', @@ -2458,6 +2661,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2486,19 +2690,23 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'sort': _params.sort, 'filter': _params.filter, 'page_limit': _params.page_limit, 'cursor': _params.cursor }; + const path = { 'workspace_id': _params.workspace_id }; + const parameters = { options: { url: '/v1/workspaces/{workspace_id}/logs', @@ -2512,6 +2720,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2539,13 +2748,16 @@ class AssistantV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customer_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'customer_id': _params.customer_id }; + const parameters = { options: { url: '/v1/user_data', @@ -2558,6 +2770,7 @@ class AssistantV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; diff --git a/assistant/v2.ts b/assistant/v2.ts index 447c815319..8565667d0f 100644 --- a/assistant/v2.ts +++ b/assistant/v2.ts @@ -80,13 +80,16 @@ class AssistantV2 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['assistant_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'assistant_id': _params.assistant_id }; + const parameters = { options: { url: '/v2/assistants/{assistant_id}/sessions', @@ -100,6 +103,7 @@ class AssistantV2 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -123,14 +127,17 @@ class AssistantV2 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['assistant_id', 'session_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'assistant_id': _params.assistant_id, 'session_id': _params.session_id }; + const parameters = { options: { url: '/v2/assistants/{assistant_id}/sessions/{session_id}', @@ -143,6 +150,7 @@ class AssistantV2 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -174,18 +182,22 @@ class AssistantV2 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['assistant_id', 'session_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'input': _params.input, 'context': _params.context }; + const path = { 'assistant_id': _params.assistant_id, 'session_id': _params.session_id }; + const parameters = { options: { url: '/v2/assistants/{assistant_id}/sessions/{session_id}/message', @@ -201,6 +213,7 @@ class AssistantV2 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -387,9 +400,9 @@ namespace AssistantV2 { export interface MessageContextGlobalSystem { /** The user time zone. The assistant uses the time zone to correctly resolve relative time references. */ timezone?: string; - /** String value provided by the API client that should be unique per each distinct end user of the service powered by Assistant. */ + /** A string value that identifies the user who is interacting with the assistant. The client must provide a unique identifier for each individual end user who accesses the application. This user ID may be used for billing and other purposes. */ user_id?: string; - /** This property is normally set by the Assistant which sets this to 1 during the first conversation turn and then increments it by 1 with every subsequent turn. A turn count equal to 0 (or > 0) informs the Assistant that this is (or is not) the first turn in a conversation which influences the behavior of some skills. The Conversation skill uses this to evaluate its `welcome` and `conversation_start` conditions. */ + /** A counter that is automatically incremented with each turn of the conversation. A value of 1 indicates that this is the the first turn of a new conversation, which can affect the behavior of some skills. */ turn_count?: number; } diff --git a/discovery/v1-generated.ts b/discovery/v1-generated.ts index 692194045b..f48a1bc6c9 100644 --- a/discovery/v1-generated.ts +++ b/discovery/v1-generated.ts @@ -82,15 +82,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, 'size': _params.size }; + const parameters = { options: { url: '/v1/environments', @@ -105,6 +108,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -121,13 +125,16 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}', @@ -141,6 +148,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -157,13 +165,16 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}', @@ -177,6 +188,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -194,9 +206,11 @@ class DiscoveryV1 extends BaseService { public listEnvironments(params?: DiscoveryV1.ListEnvironmentsParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'name': _params.name }; + const parameters = { options: { url: '/v1/environments', @@ -210,6 +224,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -229,16 +244,20 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_ids']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'collection_ids': _params.collection_ids }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/fields', @@ -253,6 +272,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -276,18 +296,22 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, 'size': _params.size }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}', @@ -303,6 +327,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -340,10 +365,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, @@ -352,9 +379,11 @@ class DiscoveryV1 extends BaseService { 'normalizations': _params.normalizations, 'source': _params.source }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/configurations', @@ -370,6 +399,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -392,14 +422,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'configuration_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'configuration_id': _params.configuration_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/configurations/{configuration_id}', @@ -413,6 +446,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -430,14 +464,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'configuration_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'configuration_id': _params.configuration_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/configurations/{configuration_id}', @@ -451,6 +488,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -470,16 +508,20 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'name': _params.name }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/configurations', @@ -494,6 +536,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -526,10 +569,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'configuration_id', 'name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, @@ -538,10 +583,12 @@ class DiscoveryV1 extends BaseService { 'normalizations': _params.normalizations, 'source': _params.source }; + const path = { 'environment_id': _params.environment_id, 'configuration_id': _params.configuration_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/configurations/{configuration_id}', @@ -557,6 +604,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -601,6 +649,7 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); @@ -610,6 +659,7 @@ class DiscoveryV1 extends BaseService { 'WARNING: `filename` should be provided if `file` is not null. This will be REQUIRED in the next major release.' ); } + const formData = { 'configuration': _params.configuration, 'file': { @@ -619,13 +669,16 @@ class DiscoveryV1 extends BaseService { }, 'metadata': _params.metadata }; + const query = { 'step': _params.step, 'configuration_id': _params.configuration_id }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/preview', @@ -641,6 +694,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -666,19 +720,23 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, 'configuration_id': _params.configuration_id, 'language': _params.language }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections', @@ -694,6 +752,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -711,14 +770,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}', @@ -732,6 +794,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -749,14 +812,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}', @@ -770,6 +836,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -789,14 +856,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/fields', @@ -810,6 +880,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -829,16 +900,20 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'name': _params.name }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections', @@ -853,6 +928,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -873,19 +949,23 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, 'configuration_id': _params.configuration_id }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}', @@ -901,6 +981,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -939,17 +1020,21 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'expansions']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'expansions': _params.expansions }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/expansions', @@ -965,6 +1050,60 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + + return this.createRequest(parameters, _callback); + }; + + /** + * Create tokenization dictionary. + * + * Upload a custom tokenization dictionary to use with the specified collection. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.environment_id - The ID of the environment. + * @param {string} params.collection_id - The ID of the collection. + * @param {TokenDictRule[]} [params.tokenization_rules] - An array of tokenization rules. Each rule contains, the + * original `text` string, component `tokens`, any alternate character set `readings`, and which `part_of_speech` the + * text is from. + * @param {Object} [params.headers] - Custom request headers + * @param {Function} [callback] - The callback that handles the response. + * @returns {NodeJS.ReadableStream|void} + */ + public createTokenizationDictionary(params: DiscoveryV1.CreateTokenizationDictionaryParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { + const _params = extend({}, params); + const _callback = (callback) ? callback : () => { /* noop */ }; + const requiredParams = ['environment_id', 'collection_id']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + + const body = { + 'tokenization_rules': _params.tokenization_rules + }; + + const path = { + 'environment_id': _params.environment_id, + 'collection_id': _params.collection_id + }; + + const parameters = { + options: { + url: '/v1/environments/{environment_id}/collections/{collection_id}/word_lists/tokenization_dictionary', + method: 'POST', + json: true, + body, + path, + }, + defaultOptions: extend(true, {}, this._options, { + headers: extend(true, { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + }, _params.headers), + }), + }; + return this.createRequest(parameters, _callback); }; @@ -985,14 +1124,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/expansions', @@ -1006,6 +1148,95 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + + return this.createRequest(parameters, _callback); + }; + + /** + * Delete tokenization dictionary. + * + * Delete the tokenization dictionary from the collection. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.environment_id - The ID of the environment. + * @param {string} params.collection_id - The ID of the collection. + * @param {Object} [params.headers] - Custom request headers + * @param {Function} [callback] - The callback that handles the response. + * @returns {NodeJS.ReadableStream|void} + */ + public deleteTokenizationDictionary(params: DiscoveryV1.DeleteTokenizationDictionaryParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { + const _params = extend({}, params); + const _callback = (callback) ? callback : () => { /* noop */ }; + const requiredParams = ['environment_id', 'collection_id']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + + const path = { + 'environment_id': _params.environment_id, + 'collection_id': _params.collection_id + }; + + const parameters = { + options: { + url: '/v1/environments/{environment_id}/collections/{collection_id}/word_lists/tokenization_dictionary', + method: 'DELETE', + path, + }, + defaultOptions: extend(true, {}, this._options, { + headers: extend(true, { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + }, _params.headers), + }), + }; + + return this.createRequest(parameters, _callback); + }; + + /** + * Get tokenization dictionary status. + * + * Returns the current status of the tokenization dictionary for the specified collection. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.environment_id - The ID of the environment. + * @param {string} params.collection_id - The ID of the collection. + * @param {Object} [params.headers] - Custom request headers + * @param {Function} [callback] - The callback that handles the response. + * @returns {NodeJS.ReadableStream|void} + */ + public getTokenizationDictionaryStatus(params: DiscoveryV1.GetTokenizationDictionaryStatusParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { + const _params = extend({}, params); + const _callback = (callback) ? callback : () => { /* noop */ }; + const requiredParams = ['environment_id', 'collection_id']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + + const path = { + 'environment_id': _params.environment_id, + 'collection_id': _params.collection_id + }; + + const parameters = { + options: { + url: '/v1/environments/{environment_id}/collections/{collection_id}/word_lists/tokenization_dictionary', + method: 'GET', + path, + }, + defaultOptions: extend(true, {}, this._options, { + headers: extend(true, { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + }, _params.headers), + }), + }; + return this.createRequest(parameters, _callback); }; @@ -1026,14 +1257,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/expansions', @@ -1047,6 +1281,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1099,6 +1334,7 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); @@ -1108,6 +1344,7 @@ class DiscoveryV1 extends BaseService { 'WARNING: `filename` should be provided if `file` is not null. This will be REQUIRED in the next major release.' ); } + const formData = { 'file': { data: _params.file, @@ -1116,10 +1353,12 @@ class DiscoveryV1 extends BaseService { }, 'metadata': _params.metadata }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/documents', @@ -1134,6 +1373,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1155,15 +1395,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'document_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'document_id': _params.document_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', @@ -1177,6 +1420,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1199,15 +1443,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'document_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'document_id': _params.document_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', @@ -1221,6 +1468,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1252,6 +1500,7 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'document_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); @@ -1261,6 +1510,7 @@ class DiscoveryV1 extends BaseService { 'WARNING: `filename` should be provided if `file` is not null. This will be REQUIRED in the next major release.' ); } + const formData = { 'file': { data: _params.file, @@ -1269,11 +1519,13 @@ class DiscoveryV1 extends BaseService { }, 'metadata': _params.metadata }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'document_id': _params.document_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', @@ -1288,6 +1540,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1317,8 +1570,7 @@ class DiscoveryV1 extends BaseService { * with filters. Useful for applications to build lists, tables, and time series. For a full list of possible * aggregations, see the Query reference. * @param {number} [params.count] - Number of results to return. - * @param {string[]} [params.return_fields] - A comma-separated list of the portion of the document hierarchy to - * return. + * @param {string[]} [params.return_fields] - A comma-separated list of the portion of the document hierarchy to return. * @param {number} [params.offset] - The number of query results to skip at the beginning. For example, if the total * number of results that are returned is 10 and the offset is 8, it returns the last two results. * @param {string[]} [params.sort] - A comma-separated list of fields in the document to sort on. You can optionally @@ -1338,8 +1590,8 @@ class DiscoveryV1 extends BaseService { * @param {string} [params.deduplicate_field] - When specified, duplicate results based on the field specified are * removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not * considered. This parameter is currently Beta functionality. - * @param {string[]} [params.collection_ids] - A comma-separated list of collection IDs to be queried against. - * Required when querying multiple collections, invalid when performing a single collection query. + * @param {string[]} [params.collection_ids] - A comma-separated list of collection IDs to be queried against. Required + * when querying multiple collections, invalid when performing a single collection query. * @param {boolean} [params.similar] - When `true`, results are returned based on their similarity to the document IDs * specified in the **similar.document_ids** parameter. * @param {string[]} [params.similar_document_ids] - A comma-separated list of document IDs to find similar documents. @@ -1362,10 +1614,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + // these params were arrays but now need to be strings, the following code is for compatibility const nonArrayParams = ['return_fields', 'sort', 'passages_fields', 'collection_ids', 'similar_document_ids', 'similar_fields']; nonArrayParams.forEach(paramName => { @@ -1373,6 +1627,7 @@ class DiscoveryV1 extends BaseService { _params[paramName] = _params[paramName].join(','); } }); + const body = { 'filter': _params.filter, 'query': _params.query, @@ -1395,9 +1650,11 @@ class DiscoveryV1 extends BaseService { 'similar.fields': _params.similar_fields, 'bias': _params.bias }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/query', @@ -1414,6 +1671,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1469,10 +1727,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_ids']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'collection_ids': _params.collection_ids, 'filter': _params.filter, @@ -1489,9 +1749,11 @@ class DiscoveryV1 extends BaseService { 'similar.document_ids': _params.similar_document_ids, 'similar.fields': _params.similar_fields }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/notices', @@ -1506,6 +1768,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1532,8 +1795,7 @@ class DiscoveryV1 extends BaseService { * with filters. Useful for applications to build lists, tables, and time series. For a full list of possible * aggregations, see the Query reference. * @param {number} [params.count] - Number of results to return. - * @param {string[]} [params.return_fields] - A comma-separated list of the portion of the document hierarchy to - * return. + * @param {string[]} [params.return_fields] - A comma-separated list of the portion of the document hierarchy to return. * @param {number} [params.offset] - The number of query results to skip at the beginning. For example, if the total * number of results that are returned is 10 and the offset is 8, it returns the last two results. * @param {string[]} [params.sort] - A comma-separated list of fields in the document to sort on. You can optionally @@ -1553,8 +1815,8 @@ class DiscoveryV1 extends BaseService { * @param {string} [params.deduplicate_field] - When specified, duplicate results based on the field specified are * removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not * considered. This parameter is currently Beta functionality. - * @param {string[]} [params.collection_ids] - A comma-separated list of collection IDs to be queried against. - * Required when querying multiple collections, invalid when performing a single collection query. + * @param {string[]} [params.collection_ids] - A comma-separated list of collection IDs to be queried against. Required + * when querying multiple collections, invalid when performing a single collection query. * @param {boolean} [params.similar] - When `true`, results are returned based on their similarity to the document IDs * specified in the **similar.document_ids** parameter. * @param {string[]} [params.similar_document_ids] - A comma-separated list of document IDs to find similar documents. @@ -1577,10 +1839,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + // these params were arrays but now need to be strings, the following code is for compatibility const nonArrayParams = ['return_fields', 'sort', 'passages_fields', 'collection_ids', 'similar_document_ids']; nonArrayParams.forEach(paramName => { @@ -1588,6 +1852,7 @@ class DiscoveryV1 extends BaseService { _params[paramName] = _params[paramName].join(','); } }); + const body = { 'filter': _params.filter, 'query': _params.query, @@ -1610,10 +1875,12 @@ class DiscoveryV1 extends BaseService { 'similar.fields': _params.similar_fields, 'bias': _params.bias }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/query', @@ -1630,6 +1897,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1659,10 +1927,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'feature': _params.feature, 'entity': _params.entity, @@ -1670,10 +1940,12 @@ class DiscoveryV1 extends BaseService { 'count': _params.count, 'evidence_count': _params.evidence_count }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/query_entities', @@ -1689,6 +1961,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1750,10 +2023,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'filter': _params.filter, 'query': _params.query, @@ -1773,10 +2048,12 @@ class DiscoveryV1 extends BaseService { 'similar.document_ids': _params.similar_document_ids, 'similar.fields': _params.similar_fields }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/notices', @@ -1791,6 +2068,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1822,10 +2100,12 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'entities': _params.entities, 'context': _params.context, @@ -1834,10 +2114,12 @@ class DiscoveryV1 extends BaseService { 'count': _params.count, 'evidence_count': _params.evidence_count }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/query_relations', @@ -1853,6 +2135,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1879,19 +2162,23 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'natural_language_query': _params.natural_language_query, 'filter': _params.filter, 'examples': _params.examples }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', @@ -1907,6 +2194,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1930,20 +2218,24 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'document_id': _params.document_id, 'cross_reference': _params.cross_reference, 'relevance': _params.relevance }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', @@ -1959,6 +2251,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1978,14 +2271,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', @@ -1999,6 +2295,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2019,15 +2316,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', @@ -2041,6 +2341,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2062,16 +2363,19 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id, 'example_id': _params.example_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', @@ -2085,6 +2389,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2105,15 +2410,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', @@ -2127,6 +2435,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2148,16 +2457,19 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id, 'example_id': _params.example_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', @@ -2171,6 +2483,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2190,14 +2503,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', @@ -2211,6 +2527,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2231,15 +2548,18 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', @@ -2253,6 +2573,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2276,20 +2597,24 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'cross_reference': _params.cross_reference, 'relevance': _params.relevance }; + const path = { 'environment_id': _params.environment_id, 'collection_id': _params.collection_id, 'query_id': _params.query_id, 'example_id': _params.example_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', @@ -2305,6 +2630,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2332,13 +2658,16 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customer_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'customer_id': _params.customer_id }; + const parameters = { options: { url: '/v1/user_data', @@ -2352,6 +2681,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2376,14 +2706,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['type', 'data']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'type': _params.type, 'data': _params.data }; + const parameters = { options: { url: '/v1/events', @@ -2398,6 +2731,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2421,11 +2755,13 @@ class DiscoveryV1 extends BaseService { public getMetricsEventRate(params?: DiscoveryV1.GetMetricsEventRateParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'start_time': _params.start_time, 'end_time': _params.end_time, 'result_type': _params.result_type }; + const parameters = { options: { url: '/v1/metrics/event_rate', @@ -2439,6 +2775,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2460,11 +2797,13 @@ class DiscoveryV1 extends BaseService { public getMetricsQuery(params?: DiscoveryV1.GetMetricsQueryParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'start_time': _params.start_time, 'end_time': _params.end_time, 'result_type': _params.result_type }; + const parameters = { options: { url: '/v1/metrics/number_of_queries', @@ -2478,6 +2817,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2501,11 +2841,13 @@ class DiscoveryV1 extends BaseService { public getMetricsQueryEvent(params?: DiscoveryV1.GetMetricsQueryEventParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'start_time': _params.start_time, 'end_time': _params.end_time, 'result_type': _params.result_type }; + const parameters = { options: { url: '/v1/metrics/number_of_queries_with_event', @@ -2519,6 +2861,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2541,11 +2884,13 @@ class DiscoveryV1 extends BaseService { public getMetricsQueryNoResults(params?: DiscoveryV1.GetMetricsQueryNoResultsParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'start_time': _params.start_time, 'end_time': _params.end_time, 'result_type': _params.result_type }; + const parameters = { options: { url: '/v1/metrics/number_of_queries_with_no_search_results', @@ -2559,6 +2904,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2578,9 +2924,11 @@ class DiscoveryV1 extends BaseService { public getMetricsQueryTokenEvent(params?: DiscoveryV1.GetMetricsQueryTokenEventParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'count': _params.count }; + const parameters = { options: { url: '/v1/metrics/top_query_tokens_with_event_rate', @@ -2594,6 +2942,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2622,6 +2971,7 @@ class DiscoveryV1 extends BaseService { public queryLog(params?: DiscoveryV1.QueryLogParams, callback?: DiscoveryV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'filter': _params.filter, 'query': _params.query, @@ -2629,6 +2979,7 @@ class DiscoveryV1 extends BaseService { 'offset': _params.offset, 'sort': _params.sort }; + const parameters = { options: { url: '/v1/logs', @@ -2642,6 +2993,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2674,17 +3026,21 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'source_type': _params.source_type, 'credential_details': _params.credential_details }; + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/credentials', @@ -2700,6 +3056,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2719,14 +3076,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'credential_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'credential_id': _params.credential_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/credentials/{credential_id}', @@ -2740,6 +3100,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2762,14 +3123,17 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'credential_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id, 'credential_id': _params.credential_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/credentials/{credential_id}', @@ -2783,6 +3147,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2803,13 +3168,16 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'environment_id': _params.environment_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/credentials', @@ -2823,6 +3191,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2851,18 +3220,22 @@ class DiscoveryV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['environment_id', 'credential_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'source_type': _params.source_type, 'credential_details': _params.credential_details }; + const path = { 'environment_id': _params.environment_id, 'credential_id': _params.credential_id }; + const parameters = { options: { url: '/v1/environments/{environment_id}/credentials/{credential_id}', @@ -2878,6 +3251,7 @@ class DiscoveryV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -3205,6 +3579,17 @@ namespace DiscoveryV1 { headers?: Object; } + /** Parameters for the `createTokenizationDictionary` operation. */ + export interface CreateTokenizationDictionaryParams { + /** The ID of the environment. */ + environment_id: string; + /** The ID of the collection. */ + collection_id: string; + /** An array of tokenization rules. Each rule contains, the original `text` string, component `tokens`, any alternate character set `readings`, and which `part_of_speech` the text is from. */ + tokenization_rules?: TokenDictRule[]; + headers?: Object; + } + /** Parameters for the `deleteExpansions` operation. */ export interface DeleteExpansionsParams { /** The ID of the environment. */ @@ -3214,6 +3599,24 @@ namespace DiscoveryV1 { headers?: Object; } + /** Parameters for the `deleteTokenizationDictionary` operation. */ + export interface DeleteTokenizationDictionaryParams { + /** The ID of the environment. */ + environment_id: string; + /** The ID of the collection. */ + collection_id: string; + headers?: Object; + } + + /** Parameters for the `getTokenizationDictionaryStatus` operation. */ + export interface GetTokenizationDictionaryStatusParams { + /** The ID of the environment. */ + environment_id: string; + /** The ID of the collection. */ + collection_id: string; + headers?: Object; + } + /** Parameters for the `listExpansions` operation. */ export interface ListExpansionsParams { /** The ID of the environment. */ @@ -4801,6 +5204,26 @@ namespace DiscoveryV1 { notices?: Notice[]; } + /** An object defining a single tokenizaion rule. */ + export interface TokenDictRule { + /** The string to tokenize. */ + text?: string; + /** Array of tokens that the `text` field is split into when found. */ + tokens?: string[]; + /** Array of tokens that represent the content of the `text` field in an alternate character set. */ + readings?: string[]; + /** The part of speech that the `text` string belongs to. For example `noun`. Custom parts of speech can be specified. */ + part_of_speech?: string; + } + + /** Object describing the current status of the tokenization dictionary. */ + export interface TokenDictStatusResponse { + /** Current tokenization dictionary status for the specified collection. */ + status?: string; + /** The type for this dictionary. Always returns `tokenization_dictionary`. */ + type?: string; + } + /** TopHitsResults. */ export interface TopHitsResults { /** Number of matching results. */ diff --git a/language-translator/v3.ts b/language-translator/v3.ts index fd33d5c036..a99a43e959 100644 --- a/language-translator/v3.ts +++ b/language-translator/v3.ts @@ -85,16 +85,19 @@ class LanguageTranslatorV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.text, 'model_id': _params.model_id, 'source': _params.source, 'target': _params.target }; + const parameters = { options: { url: '/v3/translate', @@ -109,6 +112,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -131,11 +135,13 @@ class LanguageTranslatorV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.text; + const parameters = { options: { url: '/v3/identify', @@ -150,6 +156,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -167,6 +174,7 @@ class LanguageTranslatorV3 extends BaseService { public listIdentifiableLanguages(params?: LanguageTranslatorV3.ListIdentifiableLanguagesParams, callback?: LanguageTranslatorV3.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v3/identifiable_languages', @@ -178,6 +186,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -226,10 +235,12 @@ class LanguageTranslatorV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['base_model_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const formData = { 'forced_glossary': { data: _params.forced_glossary, @@ -242,10 +253,12 @@ class LanguageTranslatorV3 extends BaseService { contentType: 'application/octet-stream' } }; + const query = { 'base_model_id': _params.base_model_id, 'name': _params.name }; + const parameters = { options: { url: '/v3/models', @@ -260,6 +273,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -278,13 +292,16 @@ class LanguageTranslatorV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['model_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'model_id': _params.model_id }; + const parameters = { options: { url: '/v3/models/{model_id}', @@ -297,6 +314,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -316,13 +334,16 @@ class LanguageTranslatorV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['model_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'model_id': _params.model_id }; + const parameters = { options: { url: '/v3/models/{model_id}', @@ -335,6 +356,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -357,11 +379,13 @@ class LanguageTranslatorV3 extends BaseService { public listModels(params?: LanguageTranslatorV3.ListModelsParams, callback?: LanguageTranslatorV3.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'source': _params.source, 'target': _params.target, 'default': _params.default_models }; + const parameters = { options: { url: '/v3/models', @@ -374,6 +398,7 @@ class LanguageTranslatorV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; diff --git a/natural-language-classifier/v1-generated.ts b/natural-language-classifier/v1-generated.ts index 351e035198..965e6c0474 100644 --- a/natural-language-classifier/v1-generated.ts +++ b/natural-language-classifier/v1-generated.ts @@ -71,16 +71,20 @@ class NaturalLanguageClassifierV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['classifier_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.text }; + const path = { 'classifier_id': _params.classifier_id }; + const parameters = { options: { url: '/v1/classifiers/{classifier_id}/classify', @@ -96,6 +100,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -118,16 +123,20 @@ class NaturalLanguageClassifierV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['classifier_id', 'collection']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'collection': _params.collection }; + const path = { 'classifier_id': _params.classifier_id }; + const parameters = { options: { url: '/v1/classifiers/{classifier_id}/classify_collection', @@ -143,6 +152,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -175,10 +185,12 @@ class NaturalLanguageClassifierV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['metadata', 'training_data']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const formData = { 'training_metadata': { data: _params.metadata, @@ -191,6 +203,7 @@ class NaturalLanguageClassifierV1 extends BaseService { contentType: 'text/csv' } }; + const parameters = { options: { url: '/v1/classifiers', @@ -204,6 +217,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -220,13 +234,16 @@ class NaturalLanguageClassifierV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['classifier_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'classifier_id': _params.classifier_id }; + const parameters = { options: { url: '/v1/classifiers/{classifier_id}', @@ -240,6 +257,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -258,13 +276,16 @@ class NaturalLanguageClassifierV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['classifier_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'classifier_id': _params.classifier_id }; + const parameters = { options: { url: '/v1/classifiers/{classifier_id}', @@ -278,6 +299,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -294,6 +316,7 @@ class NaturalLanguageClassifierV1 extends BaseService { public listClassifiers(params?: NaturalLanguageClassifierV1.ListClassifiersParams, callback?: NaturalLanguageClassifierV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v1/classifiers', @@ -306,6 +329,7 @@ class NaturalLanguageClassifierV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; diff --git a/natural-language-understanding/v1-generated.ts b/natural-language-understanding/v1-generated.ts index 7ec467197d..f7ea81e089 100644 --- a/natural-language-understanding/v1-generated.ts +++ b/natural-language-understanding/v1-generated.ts @@ -20,7 +20,7 @@ import { BaseService } from '../lib/base_service'; import { getMissingParams } from '../lib/helper'; /** - * Analyze various features of text content at scale. Provide text, raw HTML, or a public URL, and IBM Watson Natural Language Understanding will give you results for the features you request. The service cleans HTML content before analysis by default, so the results can ignore most advertisements and other unwanted content. You can create custom models with Watson Knowledge Studio that can be used to detect custom entities and relations in Natural Language Understanding. + * Analyze various features of text content at scale. Provide text, raw HTML, or a public URL and IBM Watson Natural Language Understanding will give you results for the features you request. The service cleans HTML content before analysis by default, so the results can ignore most advertisements and other unwanted content. You can create [custom models](/docs/services/natural-language-understanding/customizing.html) with Watson Knowledge Studio to detect custom entities and relations in Natural Language Understanding. */ class NaturalLanguageUnderstandingV1 extends BaseService { @@ -63,44 +63,8 @@ class NaturalLanguageUnderstandingV1 extends BaseService { /** * Analyze text, HTML, or a public webpage. * - * Analyzes text, HTML, or a public webpage with one or more text analysis features. - * - * ### Concepts - * Identify general concepts that are referenced or alluded to in your content. Concepts that are detected typically - * have an associated link to a DBpedia resource. - * - * ### Emotion - * Detect anger, disgust, fear, joy, or sadness that is conveyed by your content. Emotion information can be returned - * for detected entities, keywords, or user-specified target phrases found in the text. - * - * ### Entities - * Detect important people, places, geopolitical entities and other types of entities in your content. Entity - * detection recognizes consecutive coreferences of each entity. For example, analysis of the following text would - * count \"Barack Obama\" and \"He\" as the same entity: - * - * \"Barack Obama was the 44th President of the United States. He took office in January 2009.\" - * - * ### Keywords - * Determine the most important keywords in your content. Keyword phrases are organized by relevance in the results. - * - * ### Metadata - * Get author information, publication date, and the title of your text/HTML content. - * - * ### Relations - * Recognize when two entities are related, and identify the type of relation. For example, you can identify an - * \"awardedTo\" relation between an award and its recipient. - * - * ### Semantic Roles - * Parse sentences into subject-action-object form, and identify entities and keywords that are subjects or objects of - * an action. - * - * ### Sentiment - * Determine whether your content conveys postive or negative sentiment. Sentiment information can be returned for - * detected entities, keywords, or user-specified target phrases found in the text. - * - * ### Categories - * Categorize your content into a hierarchical 5-level taxonomy. For example, \"Leonardo DiCaprio won an Oscar\" - * returns \"/art and entertainment/movies and tv/movies\" as the most confident classification. + * Analyzes text, HTML, or a public webpage with one or more text analysis features, including categories, concepts, + * emotion, entities, keywords, metadata, relations, semantic roles, and sentiment. * * @param {Object} params - The parameters to send to the service. * @param {Features} params.features - Specific features to analyze the document for. @@ -129,10 +93,12 @@ class NaturalLanguageUnderstandingV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['features']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'features': _params.features, 'text': _params.text, @@ -145,6 +111,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { 'language': _params.language, 'limit_text_characters': _params.limit_text_characters }; + const parameters = { options: { url: '/v1/analyze', @@ -159,6 +126,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -181,13 +149,16 @@ class NaturalLanguageUnderstandingV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['model_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'model_id': _params.model_id }; + const parameters = { options: { url: '/v1/models/{model_id}', @@ -201,6 +172,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -218,6 +190,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { public listModels(params?: NaturalLanguageUnderstandingV1.ListModelsParams, callback?: NaturalLanguageUnderstandingV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v1/models', @@ -230,6 +203,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -344,7 +318,7 @@ namespace NaturalLanguageUnderstandingV1 { name?: string; } - /** The hierarchical 5-level taxonomy the content is categorized into. */ + /** Returns a five-level taxonomy of the content. The top three categories are returned. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. */ export interface CategoriesOptions { /** CategoriesOptions accepts additional properties. */ [propName: string]: any; @@ -358,7 +332,7 @@ namespace NaturalLanguageUnderstandingV1 { score?: number; } - /** Whether or not to analyze content for general concepts that are referenced or alluded to. */ + /** Returns high-level concepts in the content. For example, a research paper about deep learning might return the concept, "Artificial Intelligence" although the term is not mentioned. Supported languages: English, French, German, Japanese, Korean, Portuguese, Spanish. */ export interface ConceptsOptions { /** Maximum number of concepts to return. */ limit?: number; @@ -404,7 +378,7 @@ namespace NaturalLanguageUnderstandingV1 { score?: number; } - /** Whether or not to return emotion analysis of the content. */ + /** Detects anger, disgust, fear, joy, or sadness that is conveyed in the content or by the context around target phrases specified in the targets parameter. You can analyze emotion for detected entities with `entities.emotion` and for keywords with `keywords.emotion`. Supported languages: English. */ export interface EmotionOptions { /** Set this to `false` to hide document-level emotion results. */ document?: boolean; @@ -434,7 +408,7 @@ namespace NaturalLanguageUnderstandingV1 { sadness?: number; } - /** Whether or not to return important people, places, geopolitical, and other entities detected in the analyzed content. */ + /** Identifies people, cities, organizations, and other entities in the content. See [Entity types and subtypes](/docs/services/natural-language-understanding/entity-types.html). Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch custom models are also supported. */ export interface EntitiesOptions { /** Maximum number of entities to return. */ limit?: number; @@ -484,23 +458,23 @@ namespace NaturalLanguageUnderstandingV1 { /** Analysis features and options. */ export interface Features { - /** Whether or not to return the concepts that are mentioned in the analyzed text. */ + /** Returns high-level concepts in the content. For example, a research paper about deep learning might return the concept, "Artificial Intelligence" although the term is not mentioned. Supported languages: English, French, German, Japanese, Korean, Portuguese, Spanish. */ concepts?: ConceptsOptions; - /** Whether or not to extract the emotions implied in the analyzed text. */ + /** Detects anger, disgust, fear, joy, or sadness that is conveyed in the content or by the context around target phrases specified in the targets parameter. You can analyze emotion for detected entities with `entities.emotion` and for keywords with `keywords.emotion`. Supported languages: English */ emotion?: EmotionOptions; - /** Whether or not to extract detected entity objects from the analyzed text. */ + /** Identifies people, cities, organizations, and other entities in the content. See [Entity types and subtypes](/docs/services/natural-language-understanding/entity-types.html). Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch custom models are also supported. */ entities?: EntitiesOptions; - /** Whether or not to return the keywords in the analyzed text. */ + /** Returns important keywords in the content. Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. */ keywords?: KeywordsOptions; - /** Whether or not the author, publication date, and title of the analyzed text should be returned. This parameter is only available for URL and HTML input. */ + /** Returns information from the document, including author name, title, RSS/ATOM feeds, prominent page image, and publication date. Supports URL and HTML input types only. */ metadata?: MetadataOptions; - /** Whether or not to return the relationships between detected entities in the analyzed text. */ + /** Recognizes when two entities are related and identifies the type of relation. For example, an `awardedTo` relation might connect the entities "Nobel Prize" and "Albert Einstein". See [Relation types](/docs/services/natural-language-understanding/relations.html). Supported languages: Arabic, English, German, Japanese, Korean, Spanish. Chinese, Dutch, French, Italian, and Portuguese custom models are also supported. */ relations?: RelationsOptions; - /** Whether or not to return the subject-action-object relations from the analyzed text. */ + /** Parses sentences into subject, action, and object form. Supported languages: English, German, Japanese, Korean, Spanish. */ semantic_roles?: SemanticRolesOptions; - /** Whether or not to return the overall sentiment of the analyzed text. */ + /** Analyzes the general sentiment of your content or the sentiment toward specific target phrases. You can analyze sentiment for detected entities with `entities.sentiment` and for keywords with `keywords.sentiment`. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish */ sentiment?: SentimentOptions; - /** Whether or not to return the high level category the content is categorized as (i.e. news, art). */ + /** Returns a five-level taxonomy of the content. The top three categories are returned. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. */ categories?: CategoriesOptions; } @@ -510,7 +484,7 @@ namespace NaturalLanguageUnderstandingV1 { link?: string; } - /** An option indicating whether or not important keywords from the analyzed content should be returned. */ + /** Returns important keywords in the content. Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. */ export interface KeywordsOptions { /** Maximum number of keywords to return. */ limit?: number; @@ -534,10 +508,11 @@ namespace NaturalLanguageUnderstandingV1 { /** Models available for Relations and Entities features. */ export interface ListModelsResults { + /** An array of available models. */ models?: Model[]; } - /** The Authors, Publication Date, and Title of the document. Supports URL and HTML input types. */ + /** Returns information from the document, including author name, title, RSS/ATOM feeds, prominent page image, and publication date. Supports URL and HTML input types only. */ export interface MetadataOptions { /** MetadataOptions accepts additional properties. */ [propName: string]: any; @@ -571,6 +546,7 @@ namespace NaturalLanguageUnderstandingV1 { /** RelationArgument. */ export interface RelationArgument { + /** An array of extracted entities. */ entities?: RelationEntity[]; /** Character offsets indicating the beginning and end of the mention in the analyzed text. */ location?: number[]; @@ -586,9 +562,9 @@ namespace NaturalLanguageUnderstandingV1 { type?: string; } - /** An option specifying if the relationships found between entities in the analyzed content should be returned. */ + /** Recognizes when two entities are related and identifies the type of relation. For example, an `awardedTo` relation might connect the entities "Nobel Prize" and "Albert Einstein". See [Relation types](/docs/services/natural-language-understanding/relations.html). Supported languages: Arabic, English, German, Japanese, Korean, Spanish. Chinese, Dutch, French, Italian, and Portuguese custom models are also supported. */ export interface RelationsOptions { - /** Enter a [custom model](https://www.bluemix.net/docs/services/natural-language-understanding/customizing.html) ID to override the default model. */ + /** Enter a [custom model](/docs/services/natural-language-understanding/customizing.html) ID to override the default model. */ model?: string; } @@ -631,10 +607,11 @@ namespace NaturalLanguageUnderstandingV1 { export interface SemanticRolesObject { /** Object text. */ text?: string; + /** An array of extracted keywords. */ keywords?: SemanticRolesKeyword[]; } - /** An option specifying whether or not to identify the subjects, actions, and verbs in the analyzed content. */ + /** Parses sentences into subject, action, and object form. Supported languages: English, German, Japanese, Korean, Spanish. */ export interface SemanticRolesOptions { /** Maximum number of semantic_roles results to return. */ limit?: number; @@ -660,7 +637,9 @@ namespace NaturalLanguageUnderstandingV1 { export interface SemanticRolesSubject { /** Text that corresponds to the subject role. */ text?: string; + /** An array of extracted entities. */ entities?: SemanticRolesEntity[]; + /** An array of extracted keywords. */ keywords?: SemanticRolesKeyword[]; } @@ -672,7 +651,7 @@ namespace NaturalLanguageUnderstandingV1 { tense?: string; } - /** An option specifying if sentiment of detected entities, keywords, or phrases should be returned. */ + /** Analyzes the general sentiment of your content or the sentiment toward specific target phrases. You can analyze sentiment for detected entities with `entities.sentiment` and for keywords with `keywords.sentiment`. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish. */ export interface SentimentOptions { /** Set this to `false` to hide document-level sentiment results. */ document?: boolean; diff --git a/personality-insights/v3-generated.ts b/personality-insights/v3-generated.ts index d9de10c9cc..204876b8f8 100644 --- a/personality-insights/v3-generated.ts +++ b/personality-insights/v3-generated.ts @@ -117,16 +117,19 @@ class PersonalityInsightsV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['content', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.content; + const query = { 'raw_scores': _params.raw_scores, 'csv_headers': _params.csv_headers, 'consumption_preferences': _params.consumption_preferences }; + const parameters = { options: { url: '/v3/profile', @@ -144,6 +147,7 @@ class PersonalityInsightsV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -203,16 +207,19 @@ class PersonalityInsightsV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['content', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.content; + const query = { 'raw_scores': _params.raw_scores, 'csv_headers': _params.csv_headers, 'consumption_preferences': _params.consumption_preferences }; + const parameters = { options: { url: '/v3/profile', @@ -230,6 +237,7 @@ class PersonalityInsightsV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; diff --git a/speech-to-text/v1-generated.ts b/speech-to-text/v1-generated.ts index 5a050ad5d5..a5539f8b80 100644 --- a/speech-to-text/v1-generated.ts +++ b/speech-to-text/v1-generated.ts @@ -21,7 +21,7 @@ import { getMissingParams } from '../lib/helper'; import { FileObject } from '../lib/helper'; /** - * The IBM® Speech to Text service provides an API that uses IBM's speech-recognition capabilities to produce transcripts of spoken audio. The service can transcribe speech from various languages and audio formats. It addition to basic transcription, the service can produce detailed information about many different aspects of the audio. For most languages, the service supports two sampling rates, broadband and narrowband. It returns all JSON response content in the UTF-8 character set. For more information about the service, see the [IBM® Cloud documentation](https://console.bluemix.net/docs/services/speech-to-text/index.html). ### API usage guidelines * **Audio formats:** The service accepts audio in many formats (MIME types). See [Audio formats](https://console.bluemix.net/docs/services/speech-to-text/audio-formats.html). * **HTTP interfaces:** The service provides two HTTP Representational State Transfer (REST) interfaces for speech recognition. The basic interface includes a single synchronous method. The asynchronous interface provides multiple methods that use registered callbacks and polling for non-blocking recognition. See [The HTTP interface](https://console.bluemix.net/docs/services/speech-to-text/http.html) and [The asynchronous HTTP interface](https://console.bluemix.net/docs/services/speech-to-text/async.html). * **WebSocket interface:** The service also offers a WebSocket interface for speech recognition. The WebSocket interface provides a full-duplex, low-latency communication channel. Clients send requests and audio to the service and receive results over a single connection in an asynchronous fashion. See [The WebSocket interface](https://console.bluemix.net/docs/services/speech-to-text/websockets.html). * **Customization:** The service offers two customization interfaces. Use language model customization to expand the vocabulary of a base model with domain-specific terminology. Use acoustic model customization to adapt a base model for the acoustic characteristics of your audio. Language model customization is generally available for production use by most supported languages; acoustic model customization is beta functionality that is available for all supported languages. See [The customization interface](https://console.bluemix.net/docs/services/speech-to-text/custom.html). * **Customization IDs:** Many methods accept a customization ID to identify a custom language or custom acoustic model. Customization IDs are Globally Unique Identifiers (GUIDs). They are hexadecimal strings that have the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. * **`X-Watson-Learning-Opt-Out`:** By default, all Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. To prevent IBM from accessing your data for general service improvements, set the `X-Watson-Learning-Opt-Out` request header to `true` for all requests. You must set the header on each request that you do not want IBM to access for general service improvements. Methods of the customization interface do not log corpora, words, and audio resources that you use to build custom models. Your training data is never used to improve the service's base models. However, the service does log such data when a custom model is used with a recognition request. You must set the `X-Watson-Learning-Opt-Out` request header to `true` to prevent IBM from accessing the data to improve the service. * **`X-Watson-Metadata`**: This header allows you to associate a customer ID with data that is passed with a request. If necessary, you can use the **Delete labeled data** method to delete the data for a customer ID. See [Information security](https://console.bluemix.net/docs/services/speech-to-text/information-security.html). + * The IBM® Speech to Text service provides APIs that use IBM's speech-recognition capabilities to produce transcripts of spoken audio. The service can transcribe speech from various languages and audio formats. It addition to basic transcription, the service can produce detailed information about many different aspects of the audio. For most languages, the service supports two sampling rates, broadband and narrowband. It returns all JSON response content in the UTF-8 character set. For speech recognition, the service supports synchronous and asynchronous HTTP Representational State Transfer (REST) interfaces. It also supports a WebSocket interface that provides a full-duplex, low-latency communication channel: Clients send requests and audio to the service and receive results over a single connection asynchronously. The service also offers two customization interfaces. Use language model customization to expand the vocabulary of a base model with domain-specific terminology. Use acoustic model customization to adapt a base model for the acoustic characteristics of your audio. Language model customization is generally available for production use with most supported languages; acoustic model customization is beta functionality that is available for all supported languages. */ class SpeechToTextV1 extends BaseService { @@ -73,13 +73,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['model_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'model_id': _params.model_id }; + const parameters = { options: { url: '/v1/models/{model_id}', @@ -93,6 +96,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -112,6 +116,7 @@ class SpeechToTextV1 extends BaseService { public listModels(params?: SpeechToTextV1.ListModelsParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v1/models', @@ -124,6 +129,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -137,7 +143,7 @@ class SpeechToTextV1 extends BaseService { * Sends audio and returns transcription results for a recognition request. Returns only the final results; to enable * interim results, use the WebSocket API. The service imposes a data size limit of 100 MB. It automatically detects * the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to - * one-channel mono during transcoding. (For the `audio/l16` format, you can specify the endianness.) + * one-channel mono during transcoding. * * **See also:** [Making a basic HTTP * request](https://console.bluemix.net/docs/services/speech-to-text/http.html#HTTP-basic). @@ -152,19 +158,26 @@ class SpeechToTextV1 extends BaseService { * * **See also:** * * [Audio transmission](https://console.bluemix.net/docs/services/speech-to-text/input.html#transmission) - * * [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts) + * * [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). * * ### Audio formats (content types) * - * Use the `Content-Type` header to specify the audio format (MIME type) of the audio. The service accepts the - * following formats, including specifying the sampling rate, channels, and endianness where indicated. - * * `audio/basic` (Use only with narrowband models.) + * The service accepts audio in the following formats (MIME types). + * * For formats that are labeled **Required**, you must use the `Content-Type` header with the request to specify the + * format of the audio. + * * For all other formats, you can omit the `Content-Type` header or specify `application/octet-stream` with the + * header to have the service automatically detect the format of the audio. (With the `curl` command, you can specify + * either `\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.) + * + * Where indicated, the format that you specify must include the sampling rate and can optionally include the number + * of channels and the endianness of the audio. + * * `audio/basic` (**Required.** Use only with narrowband models.) * * `audio/flac` - * * `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of channels (`channels`) and endianness - * (`endianness`) of the audio.) + * * `audio/l16` (**Required.** Specify the sampling rate (`rate`) and optionally the number of channels (`channels`) + * and endianness (`endianness`) of the audio.) * * `audio/mp3` * * `audio/mpeg` - * * `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.) + * * `audio/mulaw` (**Required.** Specify the sampling rate (`rate`) of the audio.) * * `audio/ogg` (The service automatically detects the codec of the input audio.) * * `audio/ogg;codecs=opus` * * `audio/ogg;codecs=vorbis` @@ -175,6 +188,9 @@ class SpeechToTextV1 extends BaseService { * * **See also:** [Audio formats](https://console.bluemix.net/docs/services/speech-to-text/audio-formats.html). * + * **Note:** You must pass a content type when using any of the Watson SDKs. The SDKs require the content-type + * parameter for all audio formats. + * * ### Multipart speech recognition * * The method also supports multipart recognition requests. With multipart requests, you pass all audio data as @@ -189,23 +205,26 @@ class SpeechToTextV1 extends BaseService { * request](https://console.bluemix.net/docs/services/speech-to-text/http.html#HTTP-multi). * * @param {Object} params - The parameters to send to the service. - * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe in the format specified by - * the `Content-Type` header. + * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe. * @param {string} params.content_type - The type of the input. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. - * @param {string} [params.customization_id] - The customization ID (GUID) of a custom language model that is to be - * used with the recognition request. The base model of the specified custom language model must match the model + * @param {string} [params.language_customization_id] - The customization ID (GUID) of a custom language model that is + * to be used with the recognition request. The base model of the specified custom language model must match the model * specified with the `model` parameter. You must make the request with service credentials created for the instance - * of the service that owns the custom model. By default, no custom language model is used. + * of the service that owns the custom model. By default, no custom language model is used. See [Custom + * models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). + * + * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. * @param {string} [params.acoustic_customization_id] - The customization ID (GUID) of a custom acoustic model that is * to be used with the recognition request. The base model of the specified custom acoustic model must match the model * specified with the `model` parameter. You must make the request with service credentials created for the instance - * of the service that owns the custom model. By default, no custom acoustic model is used. + * of the service that owns the custom model. By default, no custom acoustic model is used. See [Custom + * models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). * @param {string} [params.base_model_version] - The version of the specified base model that is to be used with * recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. * The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The - * default value depends on whether the parameter is used with or without a custom model. For more information, see - * [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). + * default value depends on whether the parameter is used with or without a custom model. See [Base model + * version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). * @param {number} [params.customization_weight] - If you specify the customization ID (GUID) of a custom language * model with the recognition request, the customization weight tells the service how much weight to give to words * from the custom language model compared to those from the base model for the current request. @@ -217,43 +236,55 @@ class SpeechToTextV1 extends BaseService { * The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of * OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of * phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. + * + * See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). * @param {number} [params.inactivity_timeout] - The time in seconds after which, if only silence (no speech) is * detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio - * submission from a live microphone when a user simply walks away. Use `-1` for infinity. + * submission from a live microphone when a user simply walks away. Use `-1` for infinity. See + * [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). * @param {string[]} [params.keywords] - An array of keyword strings to spot in the audio. Each keyword string can * include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If * you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit - * the parameter or specify an empty array if you do not need to spot keywords. + * the parameter or specify an empty array if you do not need to spot keywords. See [Keyword + * spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). * @param {number} [params.keywords_threshold] - A confidence value that is the lower bound for spotting a keyword. A * word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a * probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a - * threshold, you must also specify one or more keywords. + * threshold, you must also specify one or more keywords. See [Keyword + * spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). * @param {number} [params.max_alternatives] - The maximum number of alternative transcripts that the service is to - * return. By default, a single transcription is returned. + * return. By default, a single transcription is returned. See [Maximum + * alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#max_alternatives). * @param {number} [params.word_alternatives_threshold] - A confidence value that is the lower bound for identifying a * hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No - * alternative words are computed if you omit the parameter. + * alternative words are computed if you omit the parameter. See [Word + * alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_alternatives). * @param {boolean} [params.word_confidence] - If `true`, the service returns a confidence measure in the range of 0.0 - * to 1.0 for each word. By default, no word confidence measures are returned. + * to 1.0 for each word. By default, no word confidence measures are returned. See [Word + * confidence](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_confidence). * @param {boolean} [params.timestamps] - If `true`, the service returns time alignment for each word. By default, no - * timestamps are returned. + * timestamps are returned. See [Word + * timestamps](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_timestamps). * @param {boolean} [params.profanity_filter] - If `true`, the service filters profanity from all output except for * keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return - * results with no censoring. Applies to US English transcription only. + * results with no censoring. Applies to US English transcription only. See [Profanity + * filtering](https://console.bluemix.net/docs/services/speech-to-text/output.html#profanity_filter). * @param {boolean} [params.smart_formatting] - If `true`, the service converts dates, times, series of digits and * numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in * the final transcript of a recognition request. For US English, the service also converts certain keyword strings to * punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription - * only. + * only. See [Smart + * formatting](https://console.bluemix.net/docs/services/speech-to-text/output.html#smart_formatting). * @param {boolean} [params.speaker_labels] - If `true`, the response includes labels that identify which words were * spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting * `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify - * `false` for the parameter. - * - * To determine whether a language model supports speaker labels, use the **Get models** method and check that the - * attribute `speaker_labels` is set to `true`. You can also refer to [Speaker + * `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** + * method and check that the attribute `speaker_labels` is set to `true`. See [Speaker * labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + * @param {string} [params.customization_id] - **Deprecated.** Use the `language_customization_id` parameter to + * specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do + * not specify both parameters with a request. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -262,14 +293,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['audio', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.audio; + const query = { 'model': _params.model, - 'customization_id': _params.customization_id, + 'language_customization_id': _params.language_customization_id, 'acoustic_customization_id': _params.acoustic_customization_id, 'base_model_version': _params.base_model_version, 'customization_weight': _params.customization_weight, @@ -282,8 +315,10 @@ class SpeechToTextV1 extends BaseService { 'timestamps': _params.timestamps, 'profanity_filter': _params.profanity_filter, 'smart_formatting': _params.smart_formatting, - 'speaker_labels': _params.speaker_labels + 'speaker_labels': _params.speaker_labels, + 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/recognize', @@ -299,6 +334,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -316,13 +352,13 @@ class SpeechToTextV1 extends BaseService { * You can use the method to retrieve the results of any job, regardless of whether it was submitted with a callback * URL and the `recognitions.completed_with_results` event, and you can retrieve the results multiple times for as * long as they remain available. Use the **Check jobs** method to request information about the most recent jobs - * associated with the calling user. + * associated with the caller. * * **See also:** [Checking the status and retrieving the results of a * job](https://console.bluemix.net/docs/services/speech-to-text/async.html#job). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.id - The ID of the asynchronous job. + * @param {string} params.id - The identifier of the asynchronous job that is to be used for the request. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -331,13 +367,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'id': _params.id }; + const parameters = { options: { url: '/v1/recognitions/{id}', @@ -351,6 +390,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -375,6 +415,7 @@ class SpeechToTextV1 extends BaseService { public checkJobs(params?: SpeechToTextV1.CheckJobsParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v1/recognitions', @@ -387,6 +428,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -422,17 +464,36 @@ class SpeechToTextV1 extends BaseService { * * **See also:** [Creating a job](https://console.bluemix.net/docs/services/speech-to-text/async.html#create). * + * ### Streaming mode + * + * For requests to transcribe live audio as it becomes available, you must set the `Transfer-Encoding` header to + * `chunked` to use streaming mode. In streaming mode, the server closes the connection (status code 408) if the + * service receives no data chunk for 30 seconds and it has no audio to transcribe for 30 seconds. The server also + * closes the connection (status code 400) if no speech is detected for `inactivity_timeout` seconds of audio (not + * processing time); use the `inactivity_timeout` parameter to change the default of 30 seconds. + * + * **See also:** + * * [Audio transmission](https://console.bluemix.net/docs/services/speech-to-text/input.html#transmission) + * * [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts) + * * ### Audio formats (content types) * - * Use the `Content-Type` header to specify the audio format (MIME type) of the audio. The service accepts the - * following formats, including specifying the sampling rate, channels, and endianness where indicated. - * * `audio/basic` (Use only with narrowband models.) + * The service accepts audio in the following formats (MIME types). + * * For formats that are labeled **Required**, you must use the `Content-Type` header with the request to specify the + * format of the audio. + * * For all other formats, you can omit the `Content-Type` header or specify `application/octet-stream` with the + * header to have the service automatically detect the format of the audio. (With the `curl` command, you can specify + * either `\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.) + * + * Where indicated, the format that you specify must include the sampling rate and can optionally include the number + * of channels and the endianness of the audio. + * * `audio/basic` (**Required.** Use only with narrowband models.) * * `audio/flac` - * * `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of channels (`channels`) and endianness - * (`endianness`) of the audio.) + * * `audio/l16` (**Required.** Specify the sampling rate (`rate`) and optionally the number of channels (`channels`) + * and endianness (`endianness`) of the audio.) * * `audio/mp3` * * `audio/mpeg` - * * `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.) + * * `audio/mulaw` (**Required.** Specify the sampling rate (`rate`) of the audio.) * * `audio/ogg` (The service automatically detects the codec of the input audio.) * * `audio/ogg;codecs=opus` * * `audio/ogg;codecs=vorbis` @@ -443,9 +504,11 @@ class SpeechToTextV1 extends BaseService { * * **See also:** [Audio formats](https://console.bluemix.net/docs/services/speech-to-text/audio-formats.html). * + * **Note:** You must pass a content type when using any of the Watson SDKs. The SDKs require the content-type + * parameter for all audio formats. + * * @param {Object} params - The parameters to send to the service. - * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe in the format specified by - * the `Content-Type` header. + * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe. * @param {string} params.content_type - The type of the input. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. * @param {string} [params.callback_url] - A URL to which callback notifications are to be sent. The URL must already @@ -476,19 +539,23 @@ class SpeechToTextV1 extends BaseService { * @param {number} [params.results_ttl] - The number of minutes for which the results are to be available after the * job has finished. If not delivered via a callback, the results must be retrieved within this time. Omit the * parameter to use a time to live of one week. The parameter is valid with or without a callback URL. - * @param {string} [params.customization_id] - The customization ID (GUID) of a custom language model that is to be - * used with the recognition request. The base model of the specified custom language model must match the model + * @param {string} [params.language_customization_id] - The customization ID (GUID) of a custom language model that is + * to be used with the recognition request. The base model of the specified custom language model must match the model * specified with the `model` parameter. You must make the request with service credentials created for the instance - * of the service that owns the custom model. By default, no custom language model is used. + * of the service that owns the custom model. By default, no custom language model is used. See [Custom + * models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). + * + * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. * @param {string} [params.acoustic_customization_id] - The customization ID (GUID) of a custom acoustic model that is * to be used with the recognition request. The base model of the specified custom acoustic model must match the model * specified with the `model` parameter. You must make the request with service credentials created for the instance - * of the service that owns the custom model. By default, no custom acoustic model is used. + * of the service that owns the custom model. By default, no custom acoustic model is used. See [Custom + * models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). * @param {string} [params.base_model_version] - The version of the specified base model that is to be used with * recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. * The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The - * default value depends on whether the parameter is used with or without a custom model. For more information, see - * [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). + * default value depends on whether the parameter is used with or without a custom model. See [Base model + * version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). * @param {number} [params.customization_weight] - If you specify the customization ID (GUID) of a custom language * model with the recognition request, the customization weight tells the service how much weight to give to words * from the custom language model compared to those from the base model for the current request. @@ -500,43 +567,55 @@ class SpeechToTextV1 extends BaseService { * The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of * OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of * phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. + * + * See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). * @param {number} [params.inactivity_timeout] - The time in seconds after which, if only silence (no speech) is * detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio - * submission from a live microphone when a user simply walks away. Use `-1` for infinity. + * submission from a live microphone when a user simply walks away. Use `-1` for infinity. See + * [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). * @param {string[]} [params.keywords] - An array of keyword strings to spot in the audio. Each keyword string can * include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If * you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit - * the parameter or specify an empty array if you do not need to spot keywords. + * the parameter or specify an empty array if you do not need to spot keywords. See [Keyword + * spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). * @param {number} [params.keywords_threshold] - A confidence value that is the lower bound for spotting a keyword. A * word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a * probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a - * threshold, you must also specify one or more keywords. + * threshold, you must also specify one or more keywords. See [Keyword + * spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). * @param {number} [params.max_alternatives] - The maximum number of alternative transcripts that the service is to - * return. By default, a single transcription is returned. + * return. By default, a single transcription is returned. See [Maximum + * alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#max_alternatives). * @param {number} [params.word_alternatives_threshold] - A confidence value that is the lower bound for identifying a * hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No - * alternative words are computed if you omit the parameter. + * alternative words are computed if you omit the parameter. See [Word + * alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_alternatives). * @param {boolean} [params.word_confidence] - If `true`, the service returns a confidence measure in the range of 0.0 - * to 1.0 for each word. By default, no word confidence measures are returned. + * to 1.0 for each word. By default, no word confidence measures are returned. See [Word + * confidence](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_confidence). * @param {boolean} [params.timestamps] - If `true`, the service returns time alignment for each word. By default, no - * timestamps are returned. + * timestamps are returned. See [Word + * timestamps](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_timestamps). * @param {boolean} [params.profanity_filter] - If `true`, the service filters profanity from all output except for * keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return - * results with no censoring. Applies to US English transcription only. + * results with no censoring. Applies to US English transcription only. See [Profanity + * filtering](https://console.bluemix.net/docs/services/speech-to-text/output.html#profanity_filter). * @param {boolean} [params.smart_formatting] - If `true`, the service converts dates, times, series of digits and * numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in * the final transcript of a recognition request. For US English, the service also converts certain keyword strings to * punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription - * only. + * only. See [Smart + * formatting](https://console.bluemix.net/docs/services/speech-to-text/output.html#smart_formatting). * @param {boolean} [params.speaker_labels] - If `true`, the response includes labels that identify which words were * spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting * `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify - * `false` for the parameter. - * - * To determine whether a language model supports speaker labels, use the **Get models** method and check that the - * attribute `speaker_labels` is set to `true`. You can also refer to [Speaker + * `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** + * method and check that the attribute `speaker_labels` is set to `true`. See [Speaker * labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + * @param {string} [params.customization_id] - **Deprecated.** Use the `language_customization_id` parameter to + * specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do + * not specify both parameters with a request. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -545,18 +624,20 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['audio', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.audio; + const query = { 'model': _params.model, 'callback_url': _params.callback_url, 'events': _params.events, 'user_token': _params.user_token, 'results_ttl': _params.results_ttl, - 'customization_id': _params.customization_id, + 'language_customization_id': _params.language_customization_id, 'acoustic_customization_id': _params.acoustic_customization_id, 'base_model_version': _params.base_model_version, 'customization_weight': _params.customization_weight, @@ -569,8 +650,10 @@ class SpeechToTextV1 extends BaseService { 'timestamps': _params.timestamps, 'profanity_filter': _params.profanity_filter, 'smart_formatting': _params.smart_formatting, - 'speaker_labels': _params.speaker_labels + 'speaker_labels': _params.speaker_labels, + 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/recognitions', @@ -586,6 +669,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -599,7 +683,7 @@ class SpeechToTextV1 extends BaseService { * **See also:** [Deleting a job](https://console.bluemix.net/docs/services/speech-to-text/async.html#delete). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.id - The ID of the asynchronous job. + * @param {string} params.id - The identifier of the asynchronous job that is to be used for the request. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -608,13 +692,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'id': _params.id }; + const parameters = { options: { url: '/v1/recognitions/{id}', @@ -628,6 +715,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -679,14 +767,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['callback_url']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'callback_url': _params.callback_url, 'user_secret': _params.user_secret }; + const parameters = { options: { url: '/v1/register_callback', @@ -700,6 +791,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -722,13 +814,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['callback_url']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'callback_url': _params.callback_url }; + const parameters = { options: { url: '/v1/unregister_callback', @@ -742,6 +837,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -787,16 +883,19 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['name', 'base_model_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'base_model_name': _params.base_model_name, 'dialect': _params.dialect, 'description': _params.description }; + const parameters = { options: { url: '/v1/customizations', @@ -811,6 +910,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -825,8 +925,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-models.html#deleteModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -835,13 +936,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}', @@ -855,6 +959,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -868,8 +973,9 @@ class SpeechToTextV1 extends BaseService { * models](https://console.bluemix.net/docs/services/speech-to-text/language-models.html#listModels). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -878,13 +984,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}', @@ -898,6 +1007,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -923,9 +1033,11 @@ class SpeechToTextV1 extends BaseService { public listLanguageModels(params?: SpeechToTextV1.ListLanguageModelsParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'language': _params.language }; + const parameters = { options: { url: '/v1/customizations', @@ -939,6 +1051,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -954,8 +1067,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-models.html#resetModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -964,13 +1078,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/reset', @@ -984,6 +1101,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1016,8 +1134,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-create.html#trainModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} [params.word_type_to_add] - The type of words from the custom language model's words resource on * which to train the model: * * `all` (the default) trains the model on all new words, regardless of whether they were extracted from corpora or @@ -1042,17 +1161,21 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'word_type_to_add': _params.word_type_to_add, 'customization_weight': _params.customization_weight }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/train', @@ -1067,6 +1190,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1089,8 +1213,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/custom-upgrade.html#upgradeLanguage). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1099,13 +1224,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/upgrade_model', @@ -1119,6 +1247,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1166,8 +1295,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-create.html#addCorpora). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.corpus_name - The name of the new corpus for the custom language model. Use a localized name * that matches the language of the custom model and reflects the contents of the corpus. * * Include a maximum of 128 characters in the name. @@ -1177,8 +1307,8 @@ class SpeechToTextV1 extends BaseService { * the user. * @param {NodeJS.ReadableStream|FileObject|Buffer} params.corpus_file - A plain text file that contains the training * data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 - * encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for - * the request. + * encoding if it encounters non-ASCII characters. With the `curl` command, use the `--data-binary` option to upload + * the file for the request. * @param {boolean} [params.allow_overwrite] - If `true`, the specified corpus or audio resource overwrites an * existing corpus or audio resource with the same name. If `false`, the request fails if a corpus or audio resource * with the same name already exists. The parameter has no effect if a corpus or audio resource with the same name @@ -1192,10 +1322,12 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'corpus_name', 'corpus_file']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const formData = { 'corpus_file': { data: _params.corpus_file, @@ -1203,13 +1335,16 @@ class SpeechToTextV1 extends BaseService { contentType: 'text/plain' } }; + const query = { 'allow_overwrite': _params.allow_overwrite }; + const path = { 'customization_id': _params.customization_id, 'corpus_name': _params.corpus_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/corpora/{corpus_name}', @@ -1225,6 +1360,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1241,8 +1377,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-corpora.html#deleteCorpus). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.corpus_name - The name of the corpus for the custom language model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. @@ -1252,14 +1389,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'corpus_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'corpus_name': _params.corpus_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/corpora/{corpus_name}', @@ -1273,6 +1413,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1287,8 +1428,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-corpora.html#listCorpora). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.corpus_name - The name of the corpus for the custom language model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. @@ -1298,14 +1440,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'corpus_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'corpus_name': _params.corpus_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/corpora/{corpus_name}', @@ -1319,6 +1464,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1333,8 +1479,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-corpora.html#listCorpora). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1343,13 +1490,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/corpora', @@ -1363,6 +1513,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1404,8 +1555,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-create.html#addWords). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.word_name - The custom word for the custom language model. When you add or update a custom * word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) * to connect the tokens of compound words. @@ -1435,19 +1587,23 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'word': _params.word, 'sounds_like': _params.sounds_like, 'display_as': _params.display_as }; + const path = { 'customization_id': _params.customization_id, 'word_name': _params.word_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word_name}', @@ -1463,6 +1619,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1515,8 +1672,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-create.html#addWords). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {CustomWord[]} params.words - An array of objects that provides information about each custom word that is * to be added to or updated in the custom language model. * @param {Object} [params.headers] - Custom request headers @@ -1527,16 +1685,20 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'words']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'words': _params.words }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words', @@ -1552,6 +1714,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1568,8 +1731,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-words.html#deleteWord). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.word_name - The custom word for the custom language model. When you add or update a custom * word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) * to connect the tokens of compound words. @@ -1581,14 +1745,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'word_name': _params.word_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word_name}', @@ -1602,6 +1769,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1615,8 +1783,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-words.html#listWords). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.word_name - The custom word for the custom language model. When you add or update a custom * word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) * to connect the tokens of compound words. @@ -1628,14 +1797,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'word_name': _params.word_name }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word_name}', @@ -1649,6 +1821,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1665,8 +1838,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/language-words.html#listWords). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom language model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom language model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} [params.word_type] - The type of words to be listed from the custom language model's words * resource: * * `all` (the default) shows all words. @@ -1676,7 +1850,8 @@ class SpeechToTextV1 extends BaseService { * `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in * ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical * ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count - * ordering, values with the same count are ordered alphabetically. With cURL, URL encode the `+` symbol as `%2B`. + * ordering, values with the same count are ordered alphabetically. With the `curl` command, URL encode the `+` symbol + * as `%2B`. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1685,17 +1860,21 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'word_type': _params.word_type, 'sort': _params.sort }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words', @@ -1710,6 +1889,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1746,15 +1926,18 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['name', 'base_model_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'base_model_name': _params.base_model_name, 'description': _params.description }; + const parameters = { options: { url: '/v1/acoustic_customizations', @@ -1769,6 +1952,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1783,8 +1967,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-models.html#deleteModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1793,13 +1978,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}', @@ -1813,6 +2001,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1826,8 +2015,9 @@ class SpeechToTextV1 extends BaseService { * models](https://console.bluemix.net/docs/services/speech-to-text/acoustic-models.html#listModels). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1836,13 +2026,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}', @@ -1856,6 +2049,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1881,9 +2075,11 @@ class SpeechToTextV1 extends BaseService { public listAcousticModels(params?: SpeechToTextV1.ListAcousticModelsParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'language': _params.language }; + const parameters = { options: { url: '/v1/acoustic_customizations', @@ -1897,6 +2093,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1912,8 +2109,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-models.html#resetModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -1922,13 +2120,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/reset', @@ -1942,6 +2143,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -1981,8 +2183,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-create.html#trainModel). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} [params.custom_language_model_id] - The customization ID (GUID) of a custom language model that is * to be used during training of the custom acoustic model. Specify a custom language model that has been trained with * verbatim transcriptions of the audio resources or that contains words that are relevant to the contents of the @@ -1995,16 +2198,20 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'custom_language_model_id': _params.custom_language_model_id }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/train', @@ -2019,6 +2226,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2047,8 +2255,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/custom-upgrade.html#upgradeAcoustic). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} [params.custom_language_model_id] - If the custom acoustic model was trained with a custom language * model, the customization ID (GUID) of that custom language model. The custom language model must be upgraded before * the custom acoustic model can be upgraded. @@ -2060,16 +2269,20 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'custom_language_model_id': _params.custom_language_model_id }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/upgrade_model', @@ -2084,6 +2297,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2174,8 +2388,9 @@ class SpeechToTextV1 extends BaseService { * resource. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.audio_name - The name of the new audio resource for the custom acoustic model. Use a * localized name that matches the language of the custom model and reflects the contents of the resource. * * Include a maximum of 128 characters in the name. @@ -2200,18 +2415,22 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'audio_name', 'audio_resource', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.audio_resource; + const query = { 'allow_overwrite': _params.allow_overwrite }; + const path = { 'customization_id': _params.customization_id, 'audio_name': _params.audio_name }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}', @@ -2229,6 +2448,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2245,8 +2465,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-audio.html#deleteAudio). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.audio_name - The name of the audio resource for the custom acoustic model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. @@ -2256,14 +2477,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'audio_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'audio_name': _params.audio_name }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}', @@ -2277,6 +2501,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2303,8 +2528,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-audio.html#listAudio). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {string} params.audio_name - The name of the audio resource for the custom acoustic model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. @@ -2314,14 +2540,17 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'audio_name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'audio_name': _params.audio_name }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}', @@ -2335,6 +2564,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2351,8 +2581,9 @@ class SpeechToTextV1 extends BaseService { * model](https://console.bluemix.net/docs/services/speech-to-text/acoustic-audio.html#listAudio). * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model. You must make - * the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be + * used for the request. You must make the request with service credentials created for the instance of the service + * that owns the custom model. * @param {Object} [params.headers] - Custom request headers * @param {Function} [callback] - The callback that handles the response. * @returns {NodeJS.ReadableStream|void} @@ -2361,13 +2592,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/acoustic_customizations/{customization_id}/audio', @@ -2381,6 +2615,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2412,13 +2647,16 @@ class SpeechToTextV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customer_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'customer_id': _params.customer_id }; + const parameters = { options: { url: '/v1/user_data', @@ -2432,6 +2670,7 @@ class SpeechToTextV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -2506,40 +2745,42 @@ namespace SpeechToTextV1 { /** Parameters for the `recognize` operation. */ export interface RecognizeParams { - /** The audio to transcribe in the format specified by the `Content-Type` header. */ + /** The audio to transcribe. */ audio: NodeJS.ReadableStream|FileObject|Buffer; /** The type of the input. */ content_type: RecognizeConstants.ContentType | string; /** The identifier of the model that is to be used for the recognition request. */ model?: RecognizeConstants.Model | string; - /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. */ - customization_id?: string; - /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. */ + /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */ + language_customization_id?: string; + /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). */ acoustic_customization_id?: string; - /** The version of the specified base model that is to be used with recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). */ + /** The version of the specified base model that is to be used with recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. See [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). */ base_model_version?: string; - /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. */ + /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). */ customization_weight?: number; - /** The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. */ + /** The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. See [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). */ inactivity_timeout?: number; - /** An array of keyword strings to spot in the audio. Each keyword string can include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. */ + /** An array of keyword strings to spot in the audio. Each keyword string can include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. See [Keyword spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). */ keywords?: string[]; - /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. */ + /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. See [Keyword spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). */ keywords_threshold?: number; - /** The maximum number of alternative transcripts that the service is to return. By default, a single transcription is returned. */ + /** The maximum number of alternative transcripts that the service is to return. By default, a single transcription is returned. See [Maximum alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#max_alternatives). */ max_alternatives?: number; - /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No alternative words are computed if you omit the parameter. */ + /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No alternative words are computed if you omit the parameter. See [Word alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_alternatives). */ word_alternatives_threshold?: number; - /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, no word confidence measures are returned. */ + /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, no word confidence measures are returned. See [Word confidence](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_confidence). */ word_confidence?: boolean; - /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. */ + /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See [Word timestamps](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_timestamps). */ timestamps?: boolean; - /** If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. */ + /** If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. See [Profanity filtering](https://console.bluemix.net/docs/services/speech-to-text/output.html#profanity_filter). */ profanity_filter?: boolean; - /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. */ + /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. See [Smart formatting](https://console.bluemix.net/docs/services/speech-to-text/output.html#smart_formatting). */ smart_formatting?: boolean; - /** If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). */ + /** If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. See [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). */ speaker_labels?: boolean; + /** **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not specify both parameters with a request. */ + customization_id?: string; headers?: Object; } @@ -2547,19 +2788,20 @@ namespace SpeechToTextV1 { export namespace RecognizeConstants { /** The type of the input. */ export enum ContentType { - BASIC = 'audio/basic', - FLAC = 'audio/flac', - L16 = 'audio/l16', - MP3 = 'audio/mp3', - MPEG = 'audio/mpeg', - MULAW = 'audio/mulaw', - OGG = 'audio/ogg', - OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', - OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', - WAV = 'audio/wav', - WEBM = 'audio/webm', - WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', - WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', + APPLICATION_OCTET_STREAM = 'application/octet-stream', + AUDIO_BASIC = 'audio/basic', + AUDIO_FLAC = 'audio/flac', + AUDIO_L16 = 'audio/l16', + AUDIO_MP3 = 'audio/mp3', + AUDIO_MPEG = 'audio/mpeg', + AUDIO_MULAW = 'audio/mulaw', + AUDIO_OGG = 'audio/ogg', + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', + AUDIO_WAV = 'audio/wav', + AUDIO_WEBM = 'audio/webm', + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', } /** The identifier of the model that is to be used for the recognition request. */ export enum Model { @@ -2585,7 +2827,7 @@ namespace SpeechToTextV1 { /** Parameters for the `checkJob` operation. */ export interface CheckJobParams { - /** The ID of the asynchronous job. */ + /** The identifier of the asynchronous job that is to be used for the request. */ id: string; headers?: Object; } @@ -2597,7 +2839,7 @@ namespace SpeechToTextV1 { /** Parameters for the `createJob` operation. */ export interface CreateJobParams { - /** The audio to transcribe in the format specified by the `Content-Type` header. */ + /** The audio to transcribe. */ audio: NodeJS.ReadableStream|FileObject|Buffer; /** The type of the input. */ content_type: CreateJobConstants.ContentType | string; @@ -2611,34 +2853,36 @@ namespace SpeechToTextV1 { user_token?: string; /** The number of minutes for which the results are to be available after the job has finished. If not delivered via a callback, the results must be retrieved within this time. Omit the parameter to use a time to live of one week. The parameter is valid with or without a callback URL. */ results_ttl?: number; - /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. */ - customization_id?: string; - /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. */ + /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */ + language_customization_id?: string; + /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). */ acoustic_customization_id?: string; - /** The version of the specified base model that is to be used with recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). */ + /** The version of the specified base model that is to be used with recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. See [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). */ base_model_version?: string; - /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. */ + /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). */ customization_weight?: number; - /** The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. */ + /** The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. The parameter is useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. See [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). */ inactivity_timeout?: number; - /** An array of keyword strings to spot in the audio. Each keyword string can include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. */ + /** An array of keyword strings to spot in the audio. Each keyword string can include one or more string tokens. Keywords are spotted only in the final results, not in interim hypotheses. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. See [Keyword spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). */ keywords?: string[]; - /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. */ + /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. See [Keyword spotting](https://console.bluemix.net/docs/services/speech-to-text/output.html#keyword_spotting). */ keywords_threshold?: number; - /** The maximum number of alternative transcripts that the service is to return. By default, a single transcription is returned. */ + /** The maximum number of alternative transcripts that the service is to return. By default, a single transcription is returned. See [Maximum alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#max_alternatives). */ max_alternatives?: number; - /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No alternative words are computed if you omit the parameter. */ + /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. No alternative words are computed if you omit the parameter. See [Word alternatives](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_alternatives). */ word_alternatives_threshold?: number; - /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, no word confidence measures are returned. */ + /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, no word confidence measures are returned. See [Word confidence](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_confidence). */ word_confidence?: boolean; - /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. */ + /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See [Word timestamps](https://console.bluemix.net/docs/services/speech-to-text/output.html#word_timestamps). */ timestamps?: boolean; - /** If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. */ + /** If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. See [Profanity filtering](https://console.bluemix.net/docs/services/speech-to-text/output.html#profanity_filter). */ profanity_filter?: boolean; - /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. */ + /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. See [Smart formatting](https://console.bluemix.net/docs/services/speech-to-text/output.html#smart_formatting). */ smart_formatting?: boolean; - /** If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). */ + /** If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. See [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). */ speaker_labels?: boolean; + /** **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not specify both parameters with a request. */ + customization_id?: string; headers?: Object; } @@ -2646,19 +2890,20 @@ namespace SpeechToTextV1 { export namespace CreateJobConstants { /** The type of the input. */ export enum ContentType { - BASIC = 'audio/basic', - FLAC = 'audio/flac', - L16 = 'audio/l16', - MP3 = 'audio/mp3', - MPEG = 'audio/mpeg', - MULAW = 'audio/mulaw', - OGG = 'audio/ogg', - OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', - OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', - WAV = 'audio/wav', - WEBM = 'audio/webm', - WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', - WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', + APPLICATION_OCTET_STREAM = 'application/octet-stream', + AUDIO_BASIC = 'audio/basic', + AUDIO_FLAC = 'audio/flac', + AUDIO_L16 = 'audio/l16', + AUDIO_MP3 = 'audio/mp3', + AUDIO_MPEG = 'audio/mpeg', + AUDIO_MULAW = 'audio/mulaw', + AUDIO_OGG = 'audio/ogg', + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', + AUDIO_WAV = 'audio/wav', + AUDIO_WEBM = 'audio/webm', + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', } /** The identifier of the model that is to be used for the recognition request. */ export enum Model { @@ -2691,7 +2936,7 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteJob` operation. */ export interface DeleteJobParams { - /** The ID of the asynchronous job. */ + /** The identifier of the asynchronous job that is to be used for the request. */ id: string; headers?: Object; } @@ -2748,14 +2993,14 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteLanguageModel` operation. */ export interface DeleteLanguageModelParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `getLanguageModel` operation. */ export interface GetLanguageModelParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } @@ -2769,14 +3014,14 @@ namespace SpeechToTextV1 { /** Parameters for the `resetLanguageModel` operation. */ export interface ResetLanguageModelParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `trainLanguageModel` operation. */ export interface TrainLanguageModelParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The type of words from the custom language model's words resource on which to train the model: * `all` (the default) trains the model on all new words, regardless of whether they were extracted from corpora or were added or modified by the user. * `user` trains the model only on new words that were added or modified by the user; the model is not trained on new words extracted from corpora. */ word_type_to_add?: TrainLanguageModelConstants.WordTypeToAdd | string; @@ -2796,18 +3041,18 @@ namespace SpeechToTextV1 { /** Parameters for the `upgradeLanguageModel` operation. */ export interface UpgradeLanguageModelParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `addCorpus` operation. */ export interface AddCorpusParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the new corpus for the custom language model. Use a localized name that matches the language of the custom model and reflects the contents of the corpus. * Include a maximum of 128 characters in the name. * Do not include spaces, slashes, or backslashes in the name. * Do not use the name of a corpus that has already been added to the custom model. * Do not use the name `user`, which is reserved by the service to denote custom words that are added or modified by the user. */ corpus_name: string; - /** A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for the request. */ + /** A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With the `curl` command, use the `--data-binary` option to upload the file for the request. */ corpus_file: NodeJS.ReadableStream|FileObject|Buffer; /** If `true`, the specified corpus or audio resource overwrites an existing corpus or audio resource with the same name. If `false`, the request fails if a corpus or audio resource with the same name already exists. The parameter has no effect if a corpus or audio resource with the same name does not already exist. */ allow_overwrite?: boolean; @@ -2818,7 +3063,7 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteCorpus` operation. */ export interface DeleteCorpusParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the corpus for the custom language model. */ corpus_name: string; @@ -2827,7 +3072,7 @@ namespace SpeechToTextV1 { /** Parameters for the `getCorpus` operation. */ export interface GetCorpusParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the corpus for the custom language model. */ corpus_name: string; @@ -2836,14 +3081,14 @@ namespace SpeechToTextV1 { /** Parameters for the `listCorpora` operation. */ export interface ListCorporaParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `addWord` operation. */ export interface AddWordParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The custom word for the custom language model. When you add or update a custom word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of compound words. */ word_name: string; @@ -2858,7 +3103,7 @@ namespace SpeechToTextV1 { /** Parameters for the `addWords` operation. */ export interface AddWordsParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** An array of objects that provides information about each custom word that is to be added to or updated in the custom language model. */ words: CustomWord[]; @@ -2867,7 +3112,7 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteWord` operation. */ export interface DeleteWordParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The custom word for the custom language model. When you add or update a custom word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of compound words. */ word_name: string; @@ -2876,7 +3121,7 @@ namespace SpeechToTextV1 { /** Parameters for the `getWord` operation. */ export interface GetWordParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The custom word for the custom language model. When you add or update a custom word with the **Add a custom word** method, do not include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of compound words. */ word_name: string; @@ -2885,11 +3130,11 @@ namespace SpeechToTextV1 { /** Parameters for the `listWords` operation. */ export interface ListWordsParams { - /** The customization ID (GUID) of the custom language model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The type of words to be listed from the custom language model's words resource: * `all` (the default) shows all words. * `user` shows only custom words that were added or modified by the user. * `corpora` shows only OOV that were extracted from corpora. */ word_type?: ListWordsConstants.WordType | string; - /** Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count ordering, values with the same count are ordered alphabetically. With cURL, URL encode the `+` symbol as `%2B`. */ + /** Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count ordering, values with the same count are ordered alphabetically. With the `curl` command, URL encode the `+` symbol as `%2B`. */ sort?: ListWordsConstants.Sort | string; headers?: Object; } @@ -2902,7 +3147,7 @@ namespace SpeechToTextV1 { USER = 'user', CORPORA = 'corpora', } - /** Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count ordering, values with the same count are ordered alphabetically. With cURL, URL encode the `+` symbol as `%2B`. */ + /** Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count ordering, values with the same count are ordered alphabetically. With the `curl` command, URL encode the `+` symbol as `%2B`. */ export enum Sort { ALPHABETICAL = 'alphabetical', COUNT = 'count', @@ -2946,14 +3191,14 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteAcousticModel` operation. */ export interface DeleteAcousticModelParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `getAcousticModel` operation. */ export interface GetAcousticModelParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } @@ -2967,14 +3212,14 @@ namespace SpeechToTextV1 { /** Parameters for the `resetAcousticModel` operation. */ export interface ResetAcousticModelParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } /** Parameters for the `trainAcousticModel` operation. */ export interface TrainAcousticModelParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The customization ID (GUID) of a custom language model that is to be used during training of the custom acoustic model. Specify a custom language model that has been trained with verbatim transcriptions of the audio resources or that contains words that are relevant to the contents of the audio resources. */ custom_language_model_id?: string; @@ -2983,7 +3228,7 @@ namespace SpeechToTextV1 { /** Parameters for the `upgradeAcousticModel` operation. */ export interface UpgradeAcousticModelParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** If the custom acoustic model was trained with a custom language model, the customization ID (GUID) of that custom language model. The custom language model must be upgraded before the custom acoustic model can be upgraded. */ custom_language_model_id?: string; @@ -2992,7 +3237,7 @@ namespace SpeechToTextV1 { /** Parameters for the `addAudio` operation. */ export interface AddAudioParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the new audio resource for the custom acoustic model. Use a localized name that matches the language of the custom model and reflects the contents of the resource. * Include a maximum of 128 characters in the name. * Do not include spaces, slashes, or backslashes in the name. * Do not use the name of an audio resource that has already been added to the custom model. */ audio_name: string; @@ -3047,7 +3292,7 @@ namespace SpeechToTextV1 { /** Parameters for the `deleteAudio` operation. */ export interface DeleteAudioParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the audio resource for the custom acoustic model. */ audio_name: string; @@ -3056,7 +3301,7 @@ namespace SpeechToTextV1 { /** Parameters for the `getAudio` operation. */ export interface GetAudioParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; /** The name of the audio resource for the custom acoustic model. */ audio_name: string; @@ -3065,7 +3310,7 @@ namespace SpeechToTextV1 { /** Parameters for the `listAudio` operation. */ export interface ListAudioParams { - /** The customization ID (GUID) of the custom acoustic model. You must make the request with service credentials created for the instance of the service that owns the custom model. */ + /** The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with service credentials created for the instance of the service that owns the custom model. */ customization_id: string; headers?: Object; } @@ -3311,11 +3556,11 @@ namespace SpeechToTextV1 { export interface SpeechRecognitionAlternative { /** A transcription of the audio. */ transcript: string; - /** A score that indicates the service's confidence in the transcript in the range of 0.0 to 1.0. Returned only for the best alternative and only with results marked as final. */ + /** A score that indicates the service's confidence in the transcript in the range of 0.0 to 1.0. A confidence score is returned only for the best alternative and only with results marked as final. */ confidence?: number; - /** Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for example: `[["hello",0.0,1.2],["world",1.2,2.5]]`. Returned only for the best alternative. */ + /** Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for example: `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned only for the best alternative. */ timestamps?: string[]; - /** A confidence score for each word of the transcript as a list of lists. Each inner list consists of two elements: the word and its confidence score in the range of 0.0 to 1.0, for example: `[["hello",0.95],["world",0.866]]`. Returned only for the best alternative and only with results marked as final. */ + /** A confidence score for each word of the transcript as a list of lists. Each inner list consists of two elements: the word and its confidence score in the range of 0.0 to 1.0, for example: `[["hello",0.95],["world",0.866]]`. Confidence scores are returned only for the best alternative and only with results marked as final. */ word_confidence?: string[]; } @@ -3333,11 +3578,11 @@ namespace SpeechToTextV1 { /** SpeechRecognitionResults. */ export interface SpeechRecognitionResults { - /** An array that can include interim and final results (interim results are returned only if supported by the method). Final results are guaranteed not to change; interim results might be replaced by further interim results and final results. The service periodically sends updates to the results list; the `result_index` is set to the lowest index in the array that has changed; it is incremented for new results. */ + /** An array of `SpeechRecognitionResult` objects that can include interim and final results (interim results are returned only if supported by the method). Final results are guaranteed not to change; interim results might be replaced by further interim results and final results. The service periodically sends updates to the results list; the `result_index` is set to the lowest index in the array that has changed; it is incremented for new results. */ results?: SpeechRecognitionResult[]; /** An index that indicates a change point in the `results` array. The service increments the index only for additional results that it sends for new audio for the same request. */ result_index?: number; - /** An array that identifies which words were spoken by which speakers in a multi-person exchange. Returned in the response only if `speaker_labels` is `true`. When interim results are also requested for methods that support them, it is possible for a `SpeechRecognitionResults` object to include only the `speaker_labels` field. */ + /** An array of `SpeakerLabelsResult` objects that identifies which words were spoken by which speakers in a multi-person exchange. The array is returned only if the `speaker_labels` parameter is `true`. When interim results are also requested for methods that support them, it is possible for a `SpeechRecognitionResults` object to include only the `speaker_labels` field. */ speaker_labels?: SpeakerLabelsResult[]; /** An array of warning messages associated with the request: * Warnings for invalid parameters or fields can include a descriptive message and a list of invalid argument strings, for example, `"Unknown arguments:"` or `"Unknown url query arguments:"` followed by a list of the form `"invalid_arg_1, invalid_arg_2."` * The following warning is returned if the request passes a custom model that is based on an older version of a base model for which an updated version is available: `"Using previous version of base model, because your custom model has been built with it. Please note that this version will be supported only for a limited time. Consider updating your custom model to the new base model. If you do not do that you will be automatically switched to base model when you used the non-updated custom model."` In both cases, the request succeeds despite the warnings. */ warnings?: string[]; diff --git a/text-to-speech/v1-generated.ts b/text-to-speech/v1-generated.ts index fba8d6c271..a9f3c097c2 100644 --- a/text-to-speech/v1-generated.ts +++ b/text-to-speech/v1-generated.ts @@ -21,7 +21,7 @@ import { getMissingParams } from '../lib/helper'; import { FileObject } from '../lib/helper'; /** - * ### Service Overview The IBM® Text to Speech service provides an API that uses IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, dialects, and voices. The service supports at least one male or female voice, sometimes both, for each language. The audio is streamed back to the client with minimal delay. For more information about the service, see the [IBM® Cloud documentation](https://console.bluemix.net/docs/services/text-to-speech/index.html). ### API usage guidelines * **Audio formats:** The service can produce audio in many formats (MIME types). See [Specifying an audio format](https://console.bluemix.net/docs/services/text-to-speech/http.html#format). * **SSML:** Many methods refer to the Speech Synthesis Markup Language (SSML). SSML is an XML-based markup language that provides text annotation for speech-synthesis applications. See [Using SSML](https://console.bluemix.net/docs/services/text-to-speech/SSML.html) and [Using IBM SPR](https://console.bluemix.net/docs/services/text-to-speech/SPRs.html). * **Word translations:** Many customization methods accept sounds-like or phonetic translations for words. Phonetic translations are based on the SSML phoneme format for representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> or in the proprietary IBM Symbolic Phonetic Representation (SPR) <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> See [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * **WebSocket interface:** The service also offers a WebSocket interface for speech synthesis. The WebSocket interface supports both plain text and SSML input, including the SSML <mark> element and word timings. See [The WebSocket interface](https://console.bluemix.net/docs/services/text-to-speech/websockets.html). * **Customization IDs:** Many methods accept a customization ID, which is a Globally Unique Identifier (GUID). Customization IDs are hexadecimal strings that have the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. * **`X-Watson-Learning-Opt-Out`:** By default, all Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. To prevent IBM from accessing your data for general service improvements, set the `X-Watson-Learning-Opt-Out` request header to `true` for all requests. You must set the header on each request that you do not want IBM to access for general service improvements. Methods of the customization interface do not log words and translations that you use to build custom voice models. Your training data is never used to improve the service's base models. However, the service does log such data when a custom model is used with a synthesize request. You must set the `X-Watson-Learning-Opt-Out` request header to `true` to prevent IBM from accessing the data to improve the service. * **`X-Watson-Metadata`:** This header allows you to associate a customer ID with data that is passed with a request. If necessary, you can use the **Delete labeled data** method to delete the data for a customer ID. See [Information security](https://console.bluemix.net/docs/services/text-to-speech/information-security.html). + * ### Service Overview The IBM® Text to Speech service provides APIs that use IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, dialects, and voices. The service supports at least one male or female voice, sometimes both, for each language. The audio is streamed back to the client with minimal delay. For speech synthesis, the service supports a synchronous HTTP Representational State Transfer (REST) interface. It also supports a WebSocket interface that provides both plain text and SSML input, including the SSML <mark> element and word timings. SSML is an XML-based markup language that provides text annotation for speech-synthesis applications. The service also offers a customization interface. You can use the interface to define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more words that, when combined, sound like the word. A phonetic translation is based on the SSML phoneme format for representing a word. You can specify a phonetic translation in standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic Phonetic Representation (SPR). */ class TextToSpeechV1 extends BaseService { @@ -77,16 +77,20 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['voice']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'customization_id': _params.customization_id }; + const path = { 'voice': _params.voice }; + const parameters = { options: { url: '/v1/voices/{voice}', @@ -100,6 +104,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -119,6 +124,7 @@ class TextToSpeechV1 extends BaseService { public listVoices(params?: TextToSpeechV1.ListVoicesParams, callback?: TextToSpeechV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const parameters = { options: { url: '/v1/voices', @@ -130,6 +136,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -172,17 +179,21 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'text': _params.text }; + const query = { 'voice': _params.voice, 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/synthesize', @@ -199,6 +210,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -238,16 +250,19 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'text': _params.text, 'voice': _params.voice, 'format': _params.format, 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/pronunciation', @@ -260,6 +275,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -293,15 +309,18 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['name']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'language': _params.language, 'description': _params.description }; + const parameters = { options: { url: '/v1/customizations', @@ -316,6 +335,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -341,13 +361,16 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}', @@ -359,6 +382,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -385,13 +409,16 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}', @@ -404,6 +431,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -431,9 +459,11 @@ class TextToSpeechV1 extends BaseService { public listVoiceModels(params?: TextToSpeechV1.ListVoiceModelsParams, callback?: TextToSpeechV1.Callback): NodeJS.ReadableStream | void { const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {/* noop */}; + const query = { 'language': _params.language }; + const parameters = { options: { url: '/v1/customizations', @@ -446,6 +476,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -458,10 +489,24 @@ class TextToSpeechV1 extends BaseService { * no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to update * it. * + * You can define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more + * words that, when combined, sound like the word. Phonetic translations are based on the SSML phoneme format for + * representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation + * + * <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> + * + * or in the proprietary IBM Symbolic Phonetic Representation (SPR) + * + * <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> + * * **Note:** This method is currently a beta release. * - * **See also:** [Updating a custom - * model](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#cuModelsUpdate). + * **See also:** + * * [Updating a custom + * model](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#cuModelsUpdate) + * * [Adding words to a Japanese custom + * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) + * * [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * * @param {Object} params - The parameters to send to the service. * @param {string} params.customization_id - The customization ID (GUID) of the custom voice model. You must make the @@ -478,18 +523,22 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'name': _params.name, 'description': _params.description, 'words': _params.words }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}', @@ -505,6 +554,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -520,13 +570,24 @@ class TextToSpeechV1 extends BaseService { * more than 20,000 entries. You must use credentials for the instance of the service that owns a model to add a word * to it. * + * You can define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more + * words that, when combined, sound like the word. Phonetic translations are based on the SSML phoneme format for + * representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation + * + * <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> + * + * or in the proprietary IBM Symbolic Phonetic Representation (SPR) + * + * <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> + * * **Note:** This method is currently a beta release. * * **See also:** * * [Adding a single word to a custom * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuWordAdd) * * [Adding words to a Japanese custom - * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd). + * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) + * * [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * * @param {Object} params - The parameters to send to the service. * @param {string} params.customization_id - The customization ID (GUID) of the custom voice model. You must make the @@ -548,18 +609,22 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word', 'translation']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'translation': _params.translation, 'part_of_speech': _params.part_of_speech }; + const path = { 'customization_id': _params.customization_id, 'word': _params.word }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word}', @@ -574,6 +639,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -585,13 +651,24 @@ class TextToSpeechV1 extends BaseService { * no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to add * words to it. * + * You can define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more + * words that, when combined, sound like the word. Phonetic translations are based on the SSML phoneme format for + * representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation + * + * <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> + * + * or in the proprietary IBM Symbolic Phonetic Representation (SPR) + * + * <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> + * * **Note:** This method is currently a beta release. * * **See also:** * * [Adding multiple words to a custom * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuWordsAdd) * * [Adding words to a Japanese custom - * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd). + * model](https://console.bluemix.net/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) + * * [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * * @param {Object} params - The parameters to send to the service. * @param {string} params.customization_id - The customization ID (GUID) of the custom voice model. You must make the @@ -610,16 +687,20 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'words']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'words': _params.words }; + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words', @@ -635,6 +716,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -661,14 +743,17 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'word': _params.word }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word}', @@ -680,6 +765,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -707,14 +793,17 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id', 'word']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id, 'word': _params.word }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words/{word}', @@ -727,6 +816,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -753,13 +843,16 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customization_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const path = { 'customization_id': _params.customization_id }; + const parameters = { options: { url: '/v1/customizations/{customization_id}/words', @@ -772,6 +865,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -803,13 +897,16 @@ class TextToSpeechV1 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['customer_id']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const query = { 'customer_id': _params.customer_id }; + const parameters = { options: { url: '/v1/user_data', @@ -821,6 +918,7 @@ class TextToSpeechV1 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; diff --git a/tone-analyzer/v3-generated.ts b/tone-analyzer/v3-generated.ts index b732822848..cf37bf96d5 100644 --- a/tone-analyzer/v3-generated.ts +++ b/tone-analyzer/v3-generated.ts @@ -111,15 +111,18 @@ class ToneAnalyzerV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['tone_input', 'content_type']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.tone_input; + const query = { 'sentences': _params.sentences, 'tones': _params.tones }; + const parameters = { options: { url: '/v3/tone', @@ -137,6 +140,7 @@ class ToneAnalyzerV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -176,13 +180,16 @@ class ToneAnalyzerV3 extends BaseService { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; const requiredParams = ['utterances']; + const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } + const body = { 'utterances': _params.utterances }; + const parameters = { options: { url: '/v3/tone_chat', @@ -199,6 +206,7 @@ class ToneAnalyzerV3 extends BaseService { }, _params.headers), }), }; + return this.createRequest(parameters, _callback); }; @@ -368,7 +376,7 @@ namespace ToneAnalyzerV3 { export interface ToneChatScore { /** The score for the tone in the range of 0.5 to 1. A score greater than 0.75 indicates a high likelihood that the tone is perceived in the utterance. */ score: number; - /** The unique, non-localized identifier of the tone for the results. The service can return results for the following tone IDs: `sad`, `frustrated`, `satisfied`, `excited`, `polite`, `impolite`, and `sympathetic`. The service returns results only for tones whose scores meet a minimum threshold of 0.5. */ + /** The unique, non-localized identifier of the tone for the results. The service returns results only for tones whose scores meet a minimum threshold of 0.5. */ tone_id: string; /** The user-visible, localized name of the tone. */ tone_name: string; From 5ac33dbc4c487b47149dae490f67714890316031 Mon Sep 17 00:00:00 2001 From: Dustin Popp Date: Mon, 29 Oct 2018 16:30:07 -0500 Subject: [PATCH 2/3] test: add unit and integration tests for new discovery methods --- test/integration/test.discovery.js | 53 ++++++++++++++++++++++++++++++ test/unit/test.discovery.v1.js | 31 +++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/test/integration/test.discovery.js b/test/integration/test.discovery.js index dddddabf8c..c56dee2068 100644 --- a/test/integration/test.discovery.js +++ b/test/integration/test.discovery.js @@ -22,12 +22,14 @@ describe('discovery_integration', function() { let configuration_id; let collection_id; let collection_id2; + let japanese_collection_id; before(function() { environment_id = auth.discovery.environment_id; configuration_id = auth.discovery.configuration_id; collection_id = auth.discovery.collection_id; collection_id2 = auth.discovery.collection_id_2; + japanese_collection_id = auth.discovery.japanese_collection_id; nock.enableNetConnect(); discovery = new DiscoveryV1( @@ -481,4 +483,55 @@ describe('discovery_integration', function() { }); }); }); + + describe('tokenization dictionary tests @slow', function() { + it('should createTokenizationDictionary', function(done) { + const params = { + environment_id, + collection_id: japanese_collection_id, + tokenization_rules: [ + { + text: 'すしネコ', + tokens: ['すし', 'ネコ'], + readings: ['寿司', 'ネコ'], + part_of_speech: 'カスタム名詞', + }, + ], + }; + + discovery.createTokenizationDictionary(params, (err, res) => { + assert.ifError(err); + assert(res.status); + assert(res.type); + done(); + }); + }); + + it('should getTokenizationDictionaryStatus', function(done) { + const params = { + environment_id, + collection_id: japanese_collection_id, + }; + + discovery.getTokenizationDictionaryStatus(params, (err, res) => { + assert.ifError(err); + assert(res.status); + assert(res.type); + done(); + }); + }); + + it('should deleteTokenizationDictionary', function(done) { + const params = { + environment_id, + collection_id: japanese_collection_id, + }; + + discovery.deleteTokenizationDictionary(params, (err, res) => { + assert.ifError(err); + assert.equal(res, ''); + done(); + }); + }); + }); }); diff --git a/test/unit/test.discovery.v1.js b/test/unit/test.discovery.v1.js index f75c7dfab6..435047e778 100644 --- a/test/unit/test.discovery.v1.js +++ b/test/unit/test.discovery.v1.js @@ -76,6 +76,8 @@ describe('discovery-v1', function() { events: '/v1/events', metrics: '/v1/metrics', logs: '/v1/logs', + tokenization_dictionaries: + '/v1/environments/env-guid/collections/col-guid/word_lists/tokenization_dictionary', }; it('should generate version was not specified (negative test)', function() { @@ -1008,6 +1010,35 @@ describe('discovery-v1', function() { assert.equal(req.method, 'GET'); }); }); + + describe('tokenization dictionary tests', function() { + it('createTokenizationDictionary', function() { + const req = discovery.createTokenizationDictionary(queryPayload, noop); + assert.equal( + req.uri.href, + service.url + paths.tokenization_dictionaries + '?version=' + service.version + ); + assert.equal(req.method, 'POST'); + }); + + it('deleteTokenizationDictionary', function() { + const req = discovery.deleteTokenizationDictionary(queryPayload, noop); + assert.equal( + req.uri.href, + service.url + paths.tokenization_dictionaries + '?version=' + service.version + ); + assert.equal(req.method, 'DELETE'); + }); + + it('getTokenizationDictionaryStatus', function() { + const req = discovery.getTokenizationDictionaryStatus(queryPayload, noop); + assert.equal( + req.uri.href, + service.url + paths.tokenization_dictionaries + '?version=' + service.version + ); + assert.equal(req.method, 'GET'); + }); + }); }); }); }); From c33404a7a2d0174afa99b7d43993fdaf299d730c Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Tue, 30 Oct 2018 19:19:37 +0000 Subject: [PATCH 3/3] chore(release): 3.13.0 [skip ci] # [3.13.0](https://github.com/watson-developer-cloud/node-sdk/compare/v3.12.0...v3.13.0) (2018-10-30) ### Features * **discovery:** add new methods: `createTokenizationDictionary`, `deleteTokenizationDictionary`, and `getTokenizationDictionaryStatus` ([d5ba660](https://github.com/watson-developer-cloud/node-sdk/commit/d5ba660)) --- CHANGELOG.md | 7 +++++++ package-lock.json | 2 +- package.json | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d3021c4c2..91d5d5b11b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +# [3.13.0](https://github.com/watson-developer-cloud/node-sdk/compare/v3.12.0...v3.13.0) (2018-10-30) + + +### Features + +* **discovery:** add new methods: `createTokenizationDictionary`, `deleteTokenizationDictionary`, and `getTokenizationDictionaryStatus` ([d5ba660](https://github.com/watson-developer-cloud/node-sdk/commit/d5ba660)) + # [3.12.0](https://github.com/watson-developer-cloud/node-sdk/compare/v3.11.1...v3.12.0) (2018-10-10) diff --git a/package-lock.json b/package-lock.json index 68a167ce6f..ec6de9991c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "watson-developer-cloud", - "version": "3.12.0", + "version": "3.13.0", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/package.json b/package.json index 1be53bc2dd..6c6936b8e9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "watson-developer-cloud", - "version": "3.12.0", + "version": "3.13.0", "description": "Client library to use the IBM Watson Services and AlchemyAPI", "main": "./index", "repository": {