Skip to content

Commit

Permalink
Merge pull request watson-developer-cloud#806 from watson-developer-c…
Browse files Browse the repository at this point in the history
…loud/patch-stt-and-discovery

Patch stt and discovery
  • Loading branch information
dpopp07 authored Nov 26, 2018
2 parents 663a70f + bf2cd68 commit 5f1fab0
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 17 deletions.
16 changes: 12 additions & 4 deletions discovery/v1-generated.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4784,6 +4784,12 @@ namespace DiscoveryV1 {
[propName: string]: any;
}

/** An object specifiying the concepts enrichment and related parameters. */
export interface NluEnrichmentConcepts {
/** The maximum number of concepts enrichments to extact from each instance of the specified field. */
limit?: number;
}

/** An object specifying the emotion detection enrichment and related parameters. */
export interface NluEnrichmentEmotion {
/** When `true`, emotion detection is performed on the entire field. */
Expand All @@ -4805,7 +4811,7 @@ namespace DiscoveryV1 {
/** When `true`, the types of mentions for each idetifieid entity is recorded. The default is `false`. */
mention_types?: boolean;
/** When `true`, a list of sentence locations for each instance of each identified entity is recorded. The default is `false`. */
sentence_location?: boolean;
sentence_locations?: boolean;
/** The enrichement model to use with entity extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, or the default public model `alchemy`. */
model?: string;
}
Expand All @@ -4826,6 +4832,8 @@ namespace DiscoveryV1 {
semantic_roles?: NluEnrichmentSemanticRoles;
/** An object specifying the relations enrichment and related parameters. */
relations?: NluEnrichmentRelations;
/** An object specifiying the concepts enrichment and related parameters. */
concepts?: NluEnrichmentConcepts;
}

/** An object specifying the Keyword enrichment and related parameters. */
Expand Down Expand Up @@ -5207,13 +5215,13 @@ namespace DiscoveryV1 {
/** An object defining a single tokenizaion rule. */
export interface TokenDictRule {
/** The string to tokenize. */
text?: string;
text: string;
/** Array of tokens that the `text` field is split into when found. */
tokens?: string[];
tokens: string[];
/** Array of tokens that represent the content of the `text` field in an alternate character set. */
readings?: string[];
/** The part of speech that the `text` string belongs to. For example `noun`. Custom parts of speech can be specified. */
part_of_speech?: string;
part_of_speech: string;
}

/** Object describing the current status of the tokenization dictionary. */
Expand Down
12 changes: 10 additions & 2 deletions lib/recognize-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ const QUERY_PARAMS_ALLOWED = [
'model',
'X-Watson-Learning-Opt-Out',
'watson-token',
'language_customization_id',
'customization_id',
'acoustic_customization_id'
];
Expand Down Expand Up @@ -111,7 +112,8 @@ class RecognizeStream extends Duplex {
* @param {Boolean} [options.objectMode=false] - alias for options.readableObjectMode
* @param {Number} [options.X-Watson-Learning-Opt-Out=false] - set to true to opt-out of allowing Watson to use this request to improve it's services
* @param {Boolean} [options.smart_formatting=false] - formats numeric values such as dates, times, currency, etc.
* @param {String} [options.customization_id] - Customization ID
* @param {String} [options.language_customization_id] - Language customization ID
* @param {String} [options.customization_id] - Customization ID (DEPRECATED)
* @param {String} [options.acoustic_customization_id] - Acoustic customization ID
* @param {IamTokenManagerV1} [options.token_manager] - Token manager for authenticating with IAM
* @param {string} [options.base_model_version] - The version of the specified base model that is to be used with recognition request or, for the **Create a session** method, with the new session.
Expand Down Expand Up @@ -201,8 +203,14 @@ class RecognizeStream extends Duplex {
options['X-Watson-Learning-Opt-Out'] = options['X-WDC-PL-OPT-OUT'];
}

// compatibility code for the deprecated param, customization_id
if (options.customization_id && !options.language_customization_id) {
options.language_customization_id = options.customization_id;
delete options.customization_id;
}

const queryParams = extend(
'customization_id' in options
'language_customization_id' in options
? pick(options, QUERY_PARAMS_ALLOWED)
: { model: 'en-US_BroadbandModel' },
pick(options, QUERY_PARAMS_ALLOWED)
Expand Down
12 changes: 6 additions & 6 deletions speech-to-text/v1-generated.ts
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ class SpeechToTextV1 extends BaseService {
*
* @param {Object} params - The parameters to send to the service.
* @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe.
* @param {string} params.content_type - The type of the input.
* @param {string} [params.content_type] - The type of the input.
* @param {string} [params.model] - The identifier of the model that is to be used for the recognition request.
* @param {string} [params.language_customization_id] - The customization ID (GUID) of a custom language model that is
* to be used with the recognition request. The base model of the specified custom language model must match the model
Expand Down Expand Up @@ -292,7 +292,7 @@ class SpeechToTextV1 extends BaseService {
public recognize(params: SpeechToTextV1.RecognizeParams, callback?: SpeechToTextV1.Callback<SpeechToTextV1.SpeechRecognitionResults>): NodeJS.ReadableStream | void {
const _params = extend({}, params);
const _callback = (callback) ? callback : () => { /* noop */ };
const requiredParams = ['audio', 'content_type'];
const requiredParams = ['audio'];

const missingParams = getMissingParams(_params, requiredParams);
if (missingParams) {
Expand Down Expand Up @@ -509,7 +509,7 @@ class SpeechToTextV1 extends BaseService {
*
* @param {Object} params - The parameters to send to the service.
* @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe.
* @param {string} params.content_type - The type of the input.
* @param {string} [params.content_type] - The type of the input.
* @param {string} [params.model] - The identifier of the model that is to be used for the recognition request.
* @param {string} [params.callback_url] - A URL to which callback notifications are to be sent. The URL must already
* be successfully white-listed by using the **Register a callback** method. You can include the same callback URL
Expand Down Expand Up @@ -623,7 +623,7 @@ class SpeechToTextV1 extends BaseService {
public createJob(params: SpeechToTextV1.CreateJobParams, callback?: SpeechToTextV1.Callback<SpeechToTextV1.RecognitionJob>): NodeJS.ReadableStream | void {
const _params = extend({}, params);
const _callback = (callback) ? callback : () => { /* noop */ };
const requiredParams = ['audio', 'content_type'];
const requiredParams = ['audio'];

const missingParams = getMissingParams(_params, requiredParams);
if (missingParams) {
Expand Down Expand Up @@ -2748,7 +2748,7 @@ namespace SpeechToTextV1 {
/** The audio to transcribe. */
audio: NodeJS.ReadableStream|FileObject|Buffer;
/** The type of the input. */
content_type: RecognizeConstants.ContentType | string;
content_type?: RecognizeConstants.ContentType | string;
/** The identifier of the model that is to be used for the recognition request. */
model?: RecognizeConstants.Model | string;
/** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */
Expand Down Expand Up @@ -2842,7 +2842,7 @@ namespace SpeechToTextV1 {
/** The audio to transcribe. */
audio: NodeJS.ReadableStream|FileObject|Buffer;
/** The type of the input. */
content_type: CreateJobConstants.ContentType | string;
content_type?: CreateJobConstants.ContentType | string;
/** The identifier of the model that is to be used for the recognition request. */
model?: CreateJobConstants.Model | string;
/** A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the **Register a callback** method. You can include the same callback URL with any number of job creation requests. Omit the parameter to poll the service for job completion and results. Use the `user_token` parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. */
Expand Down
5 changes: 3 additions & 2 deletions speech-to-text/v1.ts
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 {
*
* @param {Object} params The parameters
* @param {Stream} params.audio - Audio to be recognized
* @param {String} params.content_type - Content-type
* @param {String} [params.content_type] - Content-type
* @param {String} [params.base_model_version]
* @param {Number} [params.max_alternatives]
* @param {Boolean} [params.timestamps]
Expand All @@ -501,14 +501,15 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 {
* @param {Number} [params.word_alternatives_threshold]
* @param {Boolean} [params.profanity_filter]
* @param {Boolean} [params.smart_formatting]
* @param {String} [params.language_customization_id]
* @param {String} [params.customization_id]
* @param {String} [params.acoustic_customization_id]
* @param {Number} [params.customization_weight]
* @param {Boolean} [params.speaker_labels]
* @param {function} callback
*/
recognize(params, callback) {
const missingParams = getMissingParams(params, ['audio', 'content_type']);
const missingParams = getMissingParams(params, ['audio']);
if (missingParams) {
callback(missingParams);
return;
Expand Down
1 change: 0 additions & 1 deletion test/unit/speech-helpers.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ describe('speech_to_text', function() {
it('should check no parameters provided', function() {
speech_to_text.recognize({}, missingParameter);
speech_to_text.recognize(null, missingParameter);
speech_to_text.recognize({ audio: 'foo' }, missingParameter);
speech_to_text.recognize({ content_type: 'bar' }, missingParameter);
speech_to_text.recognize({ continuous: 'false' }, missingParameter);
});
Expand Down
4 changes: 2 additions & 2 deletions test/unit/speech-to-text.v1.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ describe('recognize', () => {

test('should enforce required parameters', done => {
// required parameters for this method
const requiredParams = ['audio', 'content_type'];
const requiredParams = ['audio'];

speech_to_text.recognize({}, err => {
checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock);
Expand Down Expand Up @@ -511,7 +511,7 @@ describe('createJob', () => {

test('should enforce required parameters', done => {
// required parameters for this method
const requiredParams = ['audio', 'content_type'];
const requiredParams = ['audio'];

speech_to_text.createJob({}, err => {
checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock);
Expand Down

0 comments on commit 5f1fab0

Please sign in to comment.