From d64c06ab843c5d91d8b2a3e9d398580aeaa4bd42 Mon Sep 17 00:00:00 2001 From: Dustin Popp Date: Tue, 20 Nov 2018 16:15:39 -0500 Subject: [PATCH 1/3] fix(speech-to-text): `content_type` is no longer a required parameter for `recognize()` or `createJob()` (it is now optional) --- speech-to-text/v1-generated.ts | 12 ++++++------ speech-to-text/v1.ts | 4 ++-- test/unit/speech-helpers.test.js | 1 - test/unit/speech-to-text.v1.test.js | 4 ++-- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/speech-to-text/v1-generated.ts b/speech-to-text/v1-generated.ts index a5539f8b80..ccb2872ffa 100644 --- a/speech-to-text/v1-generated.ts +++ b/speech-to-text/v1-generated.ts @@ -206,7 +206,7 @@ class SpeechToTextV1 extends BaseService { * * @param {Object} params - The parameters to send to the service. * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe. - * @param {string} params.content_type - The type of the input. + * @param {string} [params.content_type] - The type of the input. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. * @param {string} [params.language_customization_id] - The customization ID (GUID) of a custom language model that is * to be used with the recognition request. The base model of the specified custom language model must match the model @@ -292,7 +292,7 @@ class SpeechToTextV1 extends BaseService { public recognize(params: SpeechToTextV1.RecognizeParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; - const requiredParams = ['audio', 'content_type']; + const requiredParams = ['audio']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -509,7 +509,7 @@ class SpeechToTextV1 extends BaseService { * * @param {Object} params - The parameters to send to the service. * @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe. - * @param {string} params.content_type - The type of the input. + * @param {string} [params.content_type] - The type of the input. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. * @param {string} [params.callback_url] - A URL to which callback notifications are to be sent. The URL must already * be successfully white-listed by using the **Register a callback** method. You can include the same callback URL @@ -623,7 +623,7 @@ class SpeechToTextV1 extends BaseService { public createJob(params: SpeechToTextV1.CreateJobParams, callback?: SpeechToTextV1.Callback): NodeJS.ReadableStream | void { const _params = extend({}, params); const _callback = (callback) ? callback : () => { /* noop */ }; - const requiredParams = ['audio', 'content_type']; + const requiredParams = ['audio']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -2748,7 +2748,7 @@ namespace SpeechToTextV1 { /** The audio to transcribe. */ audio: NodeJS.ReadableStream|FileObject|Buffer; /** The type of the input. */ - content_type: RecognizeConstants.ContentType | string; + content_type?: RecognizeConstants.ContentType | string; /** The identifier of the model that is to be used for the recognition request. */ model?: RecognizeConstants.Model | string; /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */ @@ -2842,7 +2842,7 @@ namespace SpeechToTextV1 { /** The audio to transcribe. */ audio: NodeJS.ReadableStream|FileObject|Buffer; /** The type of the input. */ - content_type: CreateJobConstants.ContentType | string; + content_type?: CreateJobConstants.ContentType | string; /** The identifier of the model that is to be used for the recognition request. */ model?: CreateJobConstants.Model | string; /** A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the **Register a callback** method. You can include the same callback URL with any number of job creation requests. Omit the parameter to poll the service for job completion and results. Use the `user_token` parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. */ diff --git a/speech-to-text/v1.ts b/speech-to-text/v1.ts index cc1bbfbfd0..2f3b18e084 100644 --- a/speech-to-text/v1.ts +++ b/speech-to-text/v1.ts @@ -488,7 +488,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 { * * @param {Object} params The parameters * @param {Stream} params.audio - Audio to be recognized - * @param {String} params.content_type - Content-type + * @param {String} [params.content_type] - Content-type * @param {String} [params.base_model_version] * @param {Number} [params.max_alternatives] * @param {Boolean} [params.timestamps] @@ -508,7 +508,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 { * @param {function} callback */ recognize(params, callback) { - const missingParams = getMissingParams(params, ['audio', 'content_type']); + const missingParams = getMissingParams(params, ['audio']); if (missingParams) { callback(missingParams); return; diff --git a/test/unit/speech-helpers.test.js b/test/unit/speech-helpers.test.js index bef65301a5..653403de1f 100644 --- a/test/unit/speech-helpers.test.js +++ b/test/unit/speech-helpers.test.js @@ -34,7 +34,6 @@ describe('speech_to_text', function() { it('should check no parameters provided', function() { speech_to_text.recognize({}, missingParameter); speech_to_text.recognize(null, missingParameter); - speech_to_text.recognize({ audio: 'foo' }, missingParameter); speech_to_text.recognize({ content_type: 'bar' }, missingParameter); speech_to_text.recognize({ continuous: 'false' }, missingParameter); }); diff --git a/test/unit/speech-to-text.v1.test.js b/test/unit/speech-to-text.v1.test.js index bf251f30dc..b887e50dad 100644 --- a/test/unit/speech-to-text.v1.test.js +++ b/test/unit/speech-to-text.v1.test.js @@ -261,7 +261,7 @@ describe('recognize', () => { test('should enforce required parameters', done => { // required parameters for this method - const requiredParams = ['audio', 'content_type']; + const requiredParams = ['audio']; speech_to_text.recognize({}, err => { checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock); @@ -511,7 +511,7 @@ describe('createJob', () => { test('should enforce required parameters', done => { // required parameters for this method - const requiredParams = ['audio', 'content_type']; + const requiredParams = ['audio']; speech_to_text.createJob({}, err => { checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock); From d1fb9a96abb128ab1ed4ba03aa4027daf446e1f6 Mon Sep 17 00:00:00 2001 From: Dustin Popp Date: Tue, 20 Nov 2018 17:17:58 -0500 Subject: [PATCH 2/3] fix(discovery): update mis-defined parameters to match the service --- discovery/v1-generated.ts | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/discovery/v1-generated.ts b/discovery/v1-generated.ts index f48a1bc6c9..36ee4e645a 100644 --- a/discovery/v1-generated.ts +++ b/discovery/v1-generated.ts @@ -4784,6 +4784,12 @@ namespace DiscoveryV1 { [propName: string]: any; } + /** An object specifiying the concepts enrichment and related parameters. */ + export interface NluEnrichmentConcepts { + /** The maximum number of concepts enrichments to extact from each instance of the specified field. */ + limit?: number; + } + /** An object specifying the emotion detection enrichment and related parameters. */ export interface NluEnrichmentEmotion { /** When `true`, emotion detection is performed on the entire field. */ @@ -4805,7 +4811,7 @@ namespace DiscoveryV1 { /** When `true`, the types of mentions for each idetifieid entity is recorded. The default is `false`. */ mention_types?: boolean; /** When `true`, a list of sentence locations for each instance of each identified entity is recorded. The default is `false`. */ - sentence_location?: boolean; + sentence_locations?: boolean; /** The enrichement model to use with entity extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, or the default public model `alchemy`. */ model?: string; } @@ -4826,6 +4832,8 @@ namespace DiscoveryV1 { semantic_roles?: NluEnrichmentSemanticRoles; /** An object specifying the relations enrichment and related parameters. */ relations?: NluEnrichmentRelations; + /** An object specifiying the concepts enrichment and related parameters. */ + concepts?: NluEnrichmentConcepts; } /** An object specifying the Keyword enrichment and related parameters. */ @@ -5207,13 +5215,13 @@ namespace DiscoveryV1 { /** An object defining a single tokenizaion rule. */ export interface TokenDictRule { /** The string to tokenize. */ - text?: string; + text: string; /** Array of tokens that the `text` field is split into when found. */ - tokens?: string[]; + tokens: string[]; /** Array of tokens that represent the content of the `text` field in an alternate character set. */ readings?: string[]; /** The part of speech that the `text` string belongs to. For example `noun`. Custom parts of speech can be specified. */ - part_of_speech?: string; + part_of_speech: string; } /** Object describing the current status of the tokenization dictionary. */ From bf2cd6898ef8f430a618578c058c05951ac0b5ca Mon Sep 17 00:00:00 2001 From: Dustin Popp Date: Tue, 20 Nov 2018 17:34:35 -0500 Subject: [PATCH 3/3] fix(speech-to-text): add support for `language_customization_id` parameter to the WebSockets method, deprecate `customization_id` --- lib/recognize-stream.ts | 12 ++++++++++-- speech-to-text/v1.ts | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/recognize-stream.ts b/lib/recognize-stream.ts index 85a528a384..f15b43937a 100644 --- a/lib/recognize-stream.ts +++ b/lib/recognize-stream.ts @@ -43,6 +43,7 @@ const QUERY_PARAMS_ALLOWED = [ 'model', 'X-Watson-Learning-Opt-Out', 'watson-token', + 'language_customization_id', 'customization_id', 'acoustic_customization_id' ]; @@ -111,7 +112,8 @@ class RecognizeStream extends Duplex { * @param {Boolean} [options.objectMode=false] - alias for options.readableObjectMode * @param {Number} [options.X-Watson-Learning-Opt-Out=false] - set to true to opt-out of allowing Watson to use this request to improve it's services * @param {Boolean} [options.smart_formatting=false] - formats numeric values such as dates, times, currency, etc. - * @param {String} [options.customization_id] - Customization ID + * @param {String} [options.language_customization_id] - Language customization ID + * @param {String} [options.customization_id] - Customization ID (DEPRECATED) * @param {String} [options.acoustic_customization_id] - Acoustic customization ID * @param {IamTokenManagerV1} [options.token_manager] - Token manager for authenticating with IAM * @param {string} [options.base_model_version] - The version of the specified base model that is to be used with recognition request or, for the **Create a session** method, with the new session. @@ -201,8 +203,14 @@ class RecognizeStream extends Duplex { options['X-Watson-Learning-Opt-Out'] = options['X-WDC-PL-OPT-OUT']; } + // compatibility code for the deprecated param, customization_id + if (options.customization_id && !options.language_customization_id) { + options.language_customization_id = options.customization_id; + delete options.customization_id; + } + const queryParams = extend( - 'customization_id' in options + 'language_customization_id' in options ? pick(options, QUERY_PARAMS_ALLOWED) : { model: 'en-US_BroadbandModel' }, pick(options, QUERY_PARAMS_ALLOWED) diff --git a/speech-to-text/v1.ts b/speech-to-text/v1.ts index 2f3b18e084..dc111943aa 100644 --- a/speech-to-text/v1.ts +++ b/speech-to-text/v1.ts @@ -501,6 +501,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 { * @param {Number} [params.word_alternatives_threshold] * @param {Boolean} [params.profanity_filter] * @param {Boolean} [params.smart_formatting] + * @param {String} [params.language_customization_id] * @param {String} [params.customization_id] * @param {String} [params.acoustic_customization_id] * @param {Number} [params.customization_weight]