Skip to content

Commit

Permalink
feat: address review comments
Browse files Browse the repository at this point in the history
- STT: Remove `strict` parameter from `trainLanguageModel` and `trainAcousticModel`
- STT: Remove `processing_metrics` and `processing_metrics_interval` parameters from POST `recognize`
- STT: Add `processing_metrics`, `processing_metrics_interval` and `audio_metrics` as query params in WebSocket `recognize`
- LT: Make `filename` a required parameter in `translateDocument`
  • Loading branch information
dpopp07 committed Jun 10, 2019
1 parent bca5f40 commit 5b09882
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 53 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ You supply either an IAM service **API key** or an **access token**:

Like IAM, you can pass in credentials to let the SDK manage an access token for you or directly supply an access token to do it yourself.

If letting the SDK manage the token, you must set `authentication_type` to `icp4d`.
If you choose to let the SDK manage the token, `authentication_type` must be set to `icp4d`.

```js
const AssistantV1 = require('ibm-watson/assistant/v1');
Expand Down
6 changes: 5 additions & 1 deletion language-translator/v3.ts
Original file line number Diff line number Diff line change
Expand Up @@ -687,6 +687,7 @@ class LanguageTranslatorV3 extends BaseService {
* types](https://cloud.ibm.com/docs/services/language-translator?topic=language-translator-document-translator-tutorial#supported-file-formats)
*
* Maximum file size: **20 MB**.
* @param {string} params.filename - The filename for file.
* @param {string} [params.file_content_type] - The content type of file.
* @param {string} [params.model_id] - The model to use for translation. `model_id` or both `source` and `target` are
* required.
Expand All @@ -701,7 +702,7 @@ class LanguageTranslatorV3 extends BaseService {
public translateDocument(params: LanguageTranslatorV3.TranslateDocumentParams, callback?: LanguageTranslatorV3.Callback<LanguageTranslatorV3.DocumentStatus>): Promise<any> | void {
const _params = extend({}, params);
const _callback = callback;
const requiredParams = ['file'];
const requiredParams = ['file', 'filename'];

if (!_callback) {
return new Promise((resolve, reject) => {
Expand All @@ -718,6 +719,7 @@ class LanguageTranslatorV3 extends BaseService {
const formData = {
'file': {
data: _params.file,
filename: _params.filename,
contentType: _params.file_content_type
},
'model_id': _params.model_id,
Expand Down Expand Up @@ -934,6 +936,8 @@ namespace LanguageTranslatorV3 {
export interface TranslateDocumentParams {
/** The source file to translate. [Supported file types](https://cloud.ibm.com/docs/services/language-translator?topic=language-translator-document-translator-tutorial#supported-file-formats) Maximum file size: **20 MB**. */
file: NodeJS.ReadableStream|FileObject|Buffer;
/** The filename for file. */
filename: string;
/** The content type of file. */
file_content_type?: TranslateDocumentConstants.FileContentType | string;
/** The model to use for translation. `model_id` or both `source` and `target` are required. */
Expand Down
5 changes: 4 additions & 1 deletion lib/recognize-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,10 @@ const QUERY_PARAMS_ALLOWED = [
'language_customization_id',
'customization_id',
'acoustic_customization_id',
'access_token'
'access_token',
'processing_metrics',
'processing_metrics_interval',
'audio_metrics'
];

interface RecognizeStream extends Duplex {
Expand Down
35 changes: 2 additions & 33 deletions speech-to-text/v1-generated.ts
Original file line number Diff line number Diff line change
Expand Up @@ -349,20 +349,6 @@ class SpeechToTextV1 extends BaseService {
* **Note:** Applies to US English, Japanese, and Korean transcription only.
*
* See [Numeric redaction](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-output#redaction).
* @param {boolean} [params.processing_metrics] - If `true`, requests processing metrics about the service's
* transcription of the input audio. The service returns processing metrics at the interval specified by the
* `processing_metrics_interval` parameter. It also returns processing metrics for transcription events, for example,
* for final and interim results. By default, the service returns no processing metrics.
* @param {number} [params.processing_metrics_interval] - Specifies the interval in real wall-clock seconds at which
* the service is to return processing metrics. The parameter is ignored unless the `processing_metrics` parameter is
* set to `true`.
*
* The parameter accepts a minimum value of 0.1 seconds. The level of precision is not restricted, so you can specify
* values such as 0.25 and 0.125.
*
* The service does not impose a maximum value. If you want to receive processing metrics only for transcription
* events instead of at periodic intervals, set the value to a large number. If the value is larger than the duration
* of the audio, the service returns processing metrics only for transcription events.
* @param {boolean} [params.audio_metrics] - If `true`, requests detailed information about the signal characteristics
* of the input audio. The service returns audio metrics with the final transcription results. By default, the service
* returns no audio metrics.
Expand Down Expand Up @@ -410,8 +396,6 @@ class SpeechToTextV1 extends BaseService {
'customization_id': _params.customization_id,
'grammar_name': _params.grammar_name,
'redaction': _params.redaction,
'processing_metrics': _params.processing_metrics,
'processing_metrics_interval': _params.processing_metrics_interval,
'audio_metrics': _params.audio_metrics
};

Expand Down Expand Up @@ -1416,10 +1400,6 @@ class SpeechToTextV1 extends BaseService {
*
* The value that you assign is used for all recognition requests that use the model. You can override it for any
* recognition request by specifying a customization weight for that request.
* @param {boolean} [params.strict] - If `false`, allows training of the custom language model to proceed as long as
* the model contains at least one valid resource. The method returns an array of `TrainingWarning` objects that lists
* any invalid resources. By default (`true`), training of a custom language model fails (status code 400) if the
* model contains one or more invalid resources (corpus files, grammar files, or custom words).
* @param {OutgoingHttpHeaders} [params.headers] - Custom request headers
* @param {Function} [callback] - The callback that handles the response.
* @returns {Promise<any>|void}
Expand All @@ -1445,7 +1425,6 @@ class SpeechToTextV1 extends BaseService {
const query = {
'word_type_to_add': _params.word_type_to_add,
'customization_weight': _params.customization_weight,
'strict': _params.strict
};

const path = {
Expand Down Expand Up @@ -2900,9 +2879,8 @@ class SpeechToTextV1 extends BaseService {
* * The custom model contains less than 10 minutes or more than 200 hours of audio data.
* * You passed an incompatible custom language model with the `custom_language_model_id` query parameter. Both custom
* models must be based on the same version of the same base model.
* * The custom model contains one or more invalid audio resources. You can correct the invalid audio resources or set
* the `strict` parameter to `false` to exclude the invalid resources from the training. The model must contain at
* least one valid resource for training to succeed.
* * The custom model contains one or more invalid audio resources. You can correct the invalid audio resources.
* The model must contain at least one valid resource for training to succeed.
*
* @param {Object} params - The parameters to send to the service.
* @param {string} params.customization_id - The customization ID (GUID) of the custom acoustic model that is to be
Expand All @@ -2913,10 +2891,6 @@ class SpeechToTextV1 extends BaseService {
* verbatim transcriptions of the audio resources or that contains words that are relevant to the contents of the
* audio resources. The custom language model must be based on the same version of the same base model as the custom
* acoustic model. The credentials specified with the request must own both custom models.
* @param {boolean} [params.strict] - If `false`, allows training of the custom acoustic model to proceed as long as
* the model contains at least one valid audio resource. The method returns an array of `TrainingWarning` objects that
* lists any invalid resources. By default (`true`), training of a custom acoustic model fails (status code 400) if
* the model contains one or more invalid audio resources.
* @param {OutgoingHttpHeaders} [params.headers] - Custom request headers
* @param {Function} [callback] - The callback that handles the response.
* @returns {Promise<any>|void}
Expand All @@ -2941,7 +2915,6 @@ class SpeechToTextV1 extends BaseService {

const query = {
'custom_language_model_id': _params.custom_language_model_id,
'strict': _params.strict
};

const path = {
Expand Down Expand Up @@ -3916,8 +3889,6 @@ namespace SpeechToTextV1 {
word_type_to_add?: TrainLanguageModelConstants.WordTypeToAdd | string;
/** Specifies a customization weight for the custom language model. The customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0; the default is 0.3. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. The value that you assign is used for all recognition requests that use the model. You can override it for any recognition request by specifying a customization weight for that request. */
customization_weight?: number;
/** If `false`, allows training of the custom language model to proceed as long as the model contains at least one valid resource. The method returns an array of `TrainingWarning` objects that lists any invalid resources. By default (`true`), training of a custom language model fails (status code 400) if the model contains one or more invalid resources (corpus files, grammar files, or custom words). */
strict?: boolean;
headers?: OutgoingHttpHeaders;
return_response?: boolean;
}
Expand Down Expand Up @@ -4185,8 +4156,6 @@ namespace SpeechToTextV1 {
customization_id: string;
/** The customization ID (GUID) of a custom language model that is to be used during training of the custom acoustic model. Specify a custom language model that has been trained with verbatim transcriptions of the audio resources or that contains words that are relevant to the contents of the audio resources. The custom language model must be based on the same version of the same base model as the custom acoustic model. The credentials specified with the request must own both custom models. */
custom_language_model_id?: string;
/** If `false`, allows training of the custom acoustic model to proceed as long as the model contains at least one valid audio resource. The method returns an array of `TrainingWarning` objects that lists any invalid resources. By default (`true`), training of a custom acoustic model fails (status code 400) if the model contains one or more invalid audio resources. */
strict?: boolean;
headers?: OutgoingHttpHeaders;
return_response?: boolean;
}
Expand Down
6 changes: 3 additions & 3 deletions test/integration/language_translator.v3.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ describe('language_translator_integration', function() {

it('should get translated document status', function(done) {
if (!document_id) {
// We cannot run this test when model creation failed.
// We cannot run this test when document upload failed.
return done();
}

Expand All @@ -130,7 +130,7 @@ describe('language_translator_integration', function() {

it('should get translated document', function(done) {
if (!document_id) {
// We cannot run this test when model creation failed.
// We cannot run this test when document upload failed.
return done();
}

Expand All @@ -147,7 +147,7 @@ describe('language_translator_integration', function() {

it('should delete document', function(done) {
if (!document_id) {
// We cannot run this test when model creation failed.
// We cannot run this test when document upload failed.
return done();
}

Expand Down
11 changes: 9 additions & 2 deletions test/unit/language-translator.v3.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -1072,13 +1072,15 @@ describe('translateDocument', () => {
test('should pass the right params to createRequest', () => {
// parameters
const file = 'fake_file';
const filename = 'fake_filename';
const file_content_type = 'fake_file_content_type';
const model_id = 'fake_model_id';
const source = 'fake_source';
const target = 'fake_target';
const document_id = 'fake_document_id';
const params = {
file,
filename,
file_content_type,
model_id,
source,
Expand All @@ -1100,6 +1102,7 @@ describe('translateDocument', () => {
const expectedContentType = 'multipart/form-data';
checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType);
expect(options.formData['file'].data).toEqual(file);
expect(options.formData['file'].filename).toEqual(filename);
expect(options.formData['file'].contentType).toEqual(file_content_type);
expect(options.formData['model_id']).toEqual(model_id);
expect(options.formData['source']).toEqual(source);
Expand All @@ -1110,10 +1113,12 @@ describe('translateDocument', () => {
test('should prioritize user-given headers', () => {
// parameters
const file = 'fake_file';
const filename = 'fake_filename';
const accept = 'fake/header';
const contentType = 'fake/header';
const params = {
file,
filename,
headers: {
Accept: accept,
'Content-Type': contentType,
Expand All @@ -1127,8 +1132,10 @@ describe('translateDocument', () => {
test('should return a promise when no callback is given', () => {
// parameters
const file = 'fake_file';
const filename = 'fake_filename';
const params = {
file,
filename,
};

// invoke method
Expand All @@ -1154,7 +1161,7 @@ describe('translateDocument', () => {

test('should enforce required parameters', done => {
// required parameters for this method
const requiredParams = ['file'];
const requiredParams = ['file', 'filename'];

languageTranslator.translateDocument({}, err => {
checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock);
Expand All @@ -1164,7 +1171,7 @@ describe('translateDocument', () => {

test('should reject promise when required params are not given', done => {
// required parameters for this method
const requiredParams = ['file'];
const requiredParams = ['file', 'filename'];

const translateDocumentPromise = languageTranslator.translateDocument();
expectToBePromise(translateDocumentPromise);
Expand Down
12 changes: 0 additions & 12 deletions test/unit/speech-to-text.v1.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,6 @@ describe('recognize', () => {
const customization_id = 'fake_customization_id';
const grammar_name = 'fake_grammar_name';
const redaction = 'fake_redaction';
const processing_metrics = 'fake_processing_metrics';
const processing_metrics_interval = 'fake_processing_metrics_interval';
const audio_metrics = 'fake_audio_metrics';
const content_type = 'fake_content_type';
const params = {
Expand All @@ -266,8 +264,6 @@ describe('recognize', () => {
customization_id,
grammar_name,
redaction,
processing_metrics,
processing_metrics_interval,
audio_metrics,
content_type,
};
Expand Down Expand Up @@ -305,8 +301,6 @@ describe('recognize', () => {
expect(options.qs['customization_id']).toEqual(customization_id);
expect(options.qs['grammar_name']).toEqual(grammar_name);
expect(options.qs['redaction']).toEqual(redaction);
expect(options.qs['processing_metrics']).toEqual(processing_metrics);
expect(options.qs['processing_metrics_interval']).toEqual(processing_metrics_interval);
expect(options.qs['audio_metrics']).toEqual(audio_metrics);
});

Expand Down Expand Up @@ -1498,12 +1492,10 @@ describe('trainLanguageModel', () => {
const customization_id = 'fake_customization_id';
const word_type_to_add = 'fake_word_type_to_add';
const customization_weight = 'fake_customization_weight';
const strict = 'fake_strict';
const params = {
customization_id,
word_type_to_add,
customization_weight,
strict,
};

// invoke method
Expand All @@ -1521,7 +1513,6 @@ describe('trainLanguageModel', () => {
checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType);
expect(options.qs['word_type_to_add']).toEqual(word_type_to_add);
expect(options.qs['customization_weight']).toEqual(customization_weight);
expect(options.qs['strict']).toEqual(strict);
expect(options.path['customization_id']).toEqual(customization_id);
});

Expand Down Expand Up @@ -3587,11 +3578,9 @@ describe('trainAcousticModel', () => {
// parameters
const customization_id = 'fake_customization_id';
const custom_language_model_id = 'fake_custom_language_model_id';
const strict = 'fake_strict';
const params = {
customization_id,
custom_language_model_id,
strict,
};

// invoke method
Expand All @@ -3608,7 +3597,6 @@ describe('trainAcousticModel', () => {
const expectedContentType = undefined;
checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType);
expect(options.qs['custom_language_model_id']).toEqual(custom_language_model_id);
expect(options.qs['strict']).toEqual(strict);
expect(options.path['customization_id']).toEqual(customization_id);
});

Expand Down

0 comments on commit 5b09882

Please sign in to comment.