From c9ee501c1b80965139ee930b3c091ce1a34553b6 Mon Sep 17 00:00:00 2001 From: Ammar Dodin Date: Thu, 30 Nov 2017 19:49:46 -0500 Subject: [PATCH] :unamused: regenerated services and updated tests --- .gitignore | 3 +- conversation/v1-generated.ts | 2802 ++++++++++------- discovery/v1-generated.ts | 1357 ++++---- language-translator/v2-generated.ts | 164 +- natural-language-classifier/v1-generated.ts | 202 +- natural-language-understanding/v1.ts | 281 +- personality-insights/v3-generated.ts | 8 +- personality-insights/v3.ts | 8 +- speech-to-text/v1-generated.ts | 229 +- speech-to-text/v1.ts | 5 +- test/unit/test.adapter.conversation.v1.js | 12 +- .../test.adapter.personality_insights.v3.js | 22 +- test/unit/test.adapter.tone_analyzer.v3.js | 30 +- test/unit/test.conversation.v1.js | 10 - test/unit/test.personality_insights.v3.js | 18 +- test/unit/test.text_to_speech.v1.js | 2 +- test/unit/test.tone_analyzer.v3.js | 32 +- text-to-speech/v1-generated.ts | 940 +++--- text-to-speech/v1.ts | 22 +- tone-analyzer/v3-generated.ts | 154 +- visual-recognition/v3-generated.ts | 230 +- 21 files changed, 4043 insertions(+), 2488 deletions(-) diff --git a/.gitignore b/.gitignore index edc530123f..c5cebd7199 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,5 @@ tone-analyzer/*.js visual-recognition/*.js text-to-speech/*.js speech-to-text/*.js -authorization/*.js \ No newline at end of file +authorization/*.js +index.js \ No newline at end of file diff --git a/conversation/v1-generated.ts b/conversation/v1-generated.ts index 6083613882..4b79cf84d5 100644 --- a/conversation/v1-generated.ts +++ b/conversation/v1-generated.ts @@ -24,7 +24,7 @@ import { BaseService } from '../lib/base_service'; * The IBM Watson Conversation service combines machine learning, natural language understanding, and integrated dialog tools to create conversation flows between your apps and your users. */ -class ConversationV1 extends BaseService { +class GeneratedConversationV1 extends BaseService { name: string; // set by prototype to 'conversation' version: string; // set by prototype to 'v1' @@ -32,17 +32,17 @@ class ConversationV1 extends BaseService { static VERSION_DATE_2017_05_26: string = '2017-05-26'; static VERSION_DATE_2017_04_21: string = '2017-04-21'; - + static VERSION_DATE_2017_02_03: string = '2017-02-03'; - + static VERSION_DATE_2016_09_20: string = '2016-09-20'; - + static VERSION_DATE_2016_07_11: string = '2016-07-11'; static URL: string = 'https://gateway.watsonplatform.net/conversation/api'; /** - * Construct a ConversationV1 object. + * Construct a GeneratedConversationV1 object. * * @param {Object} options - Options for the service. * @param {String} options.version_date - The API version date to use with the service, in "YYYY-MM-DD" format. Whenever the API is changed in a backwards incompatible way, a new minor version of the API is released. The service uses the API version for the date you specify, or the most recent version before that date. Note that you should not programmatically specify the current date at runtime, in case the API has been updated since your application's release. Instead, specify a version date that is compatible with your application, and don't change it until your application is ready for a later version. @@ -53,148 +53,167 @@ class ConversationV1 extends BaseService { * @param {Object} [options.headers] - Default headers that shall be included with every request to the service. * @param {Object} [options.headers.X-Watson-Learning-Opt-Out] - Set to `true` to opt-out of data collection. By default, all IBM Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. If you are concerned with protecting the privacy of users' personal information or otherwise do not want your requests to be logged, you can opt out of logging. * @constructor - * @returns {ConversationV1} + * @returns {GeneratedConversationV1} * @throws {Error} */ - constructor(options: ConversationV1.Options) { + constructor(options: GeneratedConversationV1.Options) { super(options); // check if 'version_date' was provided if (typeof this._options.version_date === 'undefined') { - throw new Error('Argument error: version_date was not specified, use ConversationV1.VERSION_DATE_2017_05_26'); + throw new Error('Argument error: version_date was not specified'); } this._options.qs.version = options.version_date; } /************************* - * counterexamples + * workspaces ************************/ /** - * Create counterexample. + * Create workspace. * - * Add a new counterexample to a workspace. Counterexamples are examples that have been marked as irrelevant input. + * Create a workspace based on component objects. You must provide workspace components defining the content of the new workspace. * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.text - The text of a user input marked as irrelevant input. + * @param {Object} [params] - The parameters to send to the service. + * @param {string} [params.name] - The name of the workspace. + * @param {string} [params.description] - The description of the workspace. + * @param {string} [params.language] - The language of the workspace. + * @param {CreateIntent[]} [params.intents] - An array of objects defining the intents for the workspace. + * @param {CreateEntity[]} [params.entities] - An array of objects defining the entities for the workspace. + * @param {CreateDialogNode[]} [params.dialog_nodes] - An array of objects defining the nodes in the workspace dialog. + * @param {CreateCounterexample[]} [params.counterexamples] - An array of objects defining input examples that have been marked as irrelevant input. + * @param {Object} [params.metadata] - Any metadata related to the workspace. + * @param {boolean} [params.learning_opt_out] - Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createCounterexample(params: ConversationV1.CreateCounterexampleParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'text']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const body = { - text: _params.text - }; - const path = { - workspace_id: _params.workspace_id + createWorkspace( + params?: GeneratedConversationV1.CreateWorkspaceParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; + const body = { + name: _params.name, + description: _params.description, + language: _params.language, + intents: _params.intents, + entities: _params.entities, + dialog_nodes: _params.dialog_nodes, + counterexamples: _params.counterexamples, + metadata: _params.metadata, + learning_opt_out: _params.learning_opt_out }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/counterexamples', + url: '/v1/workspaces', method: 'POST', json: true, - body: body, - path: path, + body: body }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Delete counterexample. + * Delete workspace. * - * Delete a counterexample from a workspace. Counterexamples are examples that have been marked as irrelevant input. + * Delete a workspace from the service instance. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteCounterexample(params: ConversationV1.DeleteCounterexampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteWorkspace( + params: GeneratedConversationV1.DeleteWorkspaceParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'text']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { - workspace_id: _params.workspace_id, - text: _params.text + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', + url: '/v1/workspaces/{workspace_id}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Get counterexample. + * Get information about a workspace. * - * Get information about a counterexample. Counterexamples are examples that have been marked as irrelevant input. + * Get information about a workspace, optionally including all workspace content. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). + * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getCounterexample(params: ConversationV1.GetCounterexampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + getWorkspace( + params: GeneratedConversationV1.GetWorkspaceParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'text']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { - workspace_id: _params.workspace_id, - text: _params.text + const query = { + export: _params.export + }; + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', + url: '/v1/workspaces/{workspace_id}', method: 'GET', - path: path, + qs: query, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * List counterexamples. + * List workspaces. * - * List the counterexamples for a workspace. Counterexamples are examples that have been marked as irrelevant input. + * List the workspaces associated with a Conversation service instance. * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. + * @param {Object} [params] - The parameters to send to the service. * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. * @param {boolean} [params.include_count] - Whether to include information about the number of records returned. * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. @@ -202,491 +221,302 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listCounterexamples(params: ConversationV1.ListCounterexamplesParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const query = { + listWorkspaces( + params?: GeneratedConversationV1.ListWorkspacesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; + const query = { page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { - workspace_id: _params.workspace_id - }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/counterexamples', + url: '/v1/workspaces', method: 'GET', - qs: query, - path: path, + qs: query }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update counterexample. + * Update workspace. * - * Update the text of a counterexample. Counterexamples are examples that have been marked as irrelevant input. + * Update an existing workspace with new or modified data. You must provide component objects defining the content of the updated workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). - * @param {string} [params.new_text] - The text of the example to be marked as irrelevant input. + * @param {string} [params.name] - The name of the workspace. + * @param {string} [params.description] - The description of the workspace. + * @param {string} [params.language] - The language of the workspace. + * @param {CreateIntent[]} [params.intents] - An array of objects defining the intents for the workspace. + * @param {CreateEntity[]} [params.entities] - An array of objects defining the entities for the workspace. + * @param {CreateDialogNode[]} [params.dialog_nodes] - An array of objects defining the nodes in the workspace dialog. + * @param {CreateCounterexample[]} [params.counterexamples] - An array of objects defining input examples that have been marked as irrelevant input. + * @param {Object} [params.metadata] - Any metadata related to the workspace. + * @param {boolean} [params.learning_opt_out] - Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateCounterexample(params: ConversationV1.UpdateCounterexampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateWorkspace( + params: GeneratedConversationV1.UpdateWorkspaceParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'text']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - text: _params.new_text + const body = { + name: _params.name, + description: _params.description, + language: _params.language, + intents: _params.intents, + entities: _params.entities, + dialog_nodes: _params.dialog_nodes, + counterexamples: _params.counterexamples, + metadata: _params.metadata, + learning_opt_out: _params.learning_opt_out }; - const path = { - workspace_id: _params.workspace_id, - text: _params.text + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', + url: '/v1/workspaces/{workspace_id}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* - * dialogNodes + * message ************************/ /** - * Create dialog node. - * - * Create a dialog node. + * Get a response to a user's input. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.dialog_node - The dialog node ID. - * @param {string} [params.description] - The description of the dialog node. - * @param {string} [params.conditions] - The condition that will trigger the dialog node. - * @param {string} [params.parent] - The ID of the parent dialog node (if any). - * @param {string} [params.previous_sibling] - The previous dialog node. - * @param {Object} [params.output] - The output of the dialog node. - * @param {Object} [params.context] - The context for the dialog node. - * @param {Object} [params.metadata] - The metadata for the dialog node. - * @param {DialogNodeNextStep} [params.next_step] - The next step to execute following this dialog node. - * @param {DialogNodeAction[]} [params.actions] - The actions for the dialog node. - * @param {string} [params.title] - The alias used to identify the dialog node. - * @param {string} [params.node_type] - How the dialog node is processed. - * @param {string} [params.event_name] - How an `event_handler` node is processed. - * @param {string} [params.variable] - The location in the dialog context where output is stored. + * @param {string} params.workspace_id - Unique identifier of the workspace. + * @param {InputData} [params.input] - An input object that includes the input text. + * @param {boolean} [params.alternate_intents] - Whether to return more than one intent. Set to `true` to return all matching intents. + * @param {Context} [params.context] - State information for the conversation. Continue a conversation by including the context object from the previous response. + * @param {RuntimeEntity[]} [params.entities] - Include the entities from the previous response when they do not need to change and to prevent Watson from trying to identify them. + * @param {RuntimeIntent[]} [params.intents] - An array of name-confidence pairs for the user input. Include the intents from the previous response when they do not need to change and to prevent Watson from trying to identify them. + * @param {OutputData} [params.output] - System output. Include the output from the request when you have several requests within the same Dialog turn to pass back in the intermediate information. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createDialogNode(params: ConversationV1.CreateDialogNodeParams, callback?: ConversationV1.Callback): ReadableStream | void { + message( + params: GeneratedConversationV1.MessageParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'dialog_node']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - dialog_node: _params.dialog_node, - description: _params.description, - conditions: _params.conditions, - parent: _params.parent, - previous_sibling: _params.previous_sibling, - output: _params.output, + const body = { + input: _params.input, + alternate_intents: _params.alternate_intents, context: _params.context, - metadata: _params.metadata, - next_step: _params.next_step, - actions: _params.actions, - title: _params.title, - type: _params.node_type, - event_name: _params.event_name, - variable: _params.variable + entities: _params.entities, + intents: _params.intents, + output: _params.output }; - const path = { + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/dialog_nodes', + url: '/v1/workspaces/{workspace_id}/message', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } + + /************************* + * intents + ************************/ /** - * Delete dialog node. + * Create intent. * - * Delete a dialog node from the workspace. + * Create a new intent. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). + * @param {string} params.intent - The name of the intent. + * @param {string} [params.description] - The description of the intent. + * @param {CreateExample[]} [params.examples] - An array of user input examples. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteDialogNode(params: ConversationV1.DeleteDialogNodeParams, callback?: ConversationV1.Callback): ReadableStream | void { + createIntent( + params: GeneratedConversationV1.CreateIntentParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'dialog_node']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'intent']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { - workspace_id: _params.workspace_id, - dialog_node: _params.dialog_node + const body = { + intent: _params.intent, + description: _params.description, + examples: _params.examples + }; + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', - method: 'DELETE', - path: path, + url: '/v1/workspaces/{workspace_id}/intents', + method: 'POST', + json: true, + body: body, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Get dialog node. + * Delete intent. * - * Get information about a dialog node. + * Delete an intent from a workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). + * @param {string} params.intent - The intent name (for example, `pizza_order`). * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getDialogNode(params: ConversationV1.GetDialogNodeParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteIntent( + params: GeneratedConversationV1.DeleteIntentParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'dialog_node']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'intent']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, - dialog_node: _params.dialog_node + intent: _params.intent }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', - method: 'GET', - path: path, + url: '/v1/workspaces/{workspace_id}/intents/{intent}', + method: 'DELETE', + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * List dialog nodes. + * Get intent. * - * List the dialog nodes in the workspace. + * Get information about an intent, optionally including all intent content. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. - * @param {boolean} [params.include_count] - Whether to include information about the number of records returned. - * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. - * @param {string} [params.cursor] - A token identifying the last value from the previous page of results. + * @param {string} params.intent - The intent name (for example, `pizza_order`). + * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listDialogNodes(params: ConversationV1.ListDialogNodesParams, callback?: ConversationV1.Callback): ReadableStream | void { + getIntent( + params: GeneratedConversationV1.GetIntentParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'intent']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { - page_limit: _params.page_limit, - include_count: _params.include_count, - sort: _params.sort, - cursor: _params.cursor + const query = { + export: _params.export }; - const path = { - workspace_id: _params.workspace_id + const path = { + workspace_id: _params.workspace_id, + intent: _params.intent }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/dialog_nodes', + url: '/v1/workspaces/{workspace_id}/intents/{intent}', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update dialog node. + * List intents. * - * Update information for a dialog node. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). - * @param {string} params.new_dialog_node - The dialog node ID. - * @param {string} [params.new_description] - The description of the dialog node. - * @param {string} [params.new_conditions] - The condition that will trigger the dialog node. - * @param {string} [params.new_parent] - The ID of the parent dialog node (if any). - * @param {string} [params.new_previous_sibling] - The previous dialog node. - * @param {Object} [params.new_output] - The output of the dialog node. - * @param {Object} [params.new_context] - The context for the dialog node. - * @param {Object} [params.new_metadata] - The metadata for the dialog node. - * @param {DialogNodeNextStep} [params.new_next_step] - The next step to execute following this dialog node. - * @param {string} [params.new_title] - The alias used to identify the dialog node. - * @param {string} [params.new_type] - How the node is processed. - * @param {string} [params.new_event_name] - How an `event_handler` node is processed. - * @param {string} [params.new_variable] - The location in the dialog context where output is stored. - * @param {DialogNodeAction[]} [params.new_actions] - The actions for the dialog node. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - updateDialogNode(params: ConversationV1.UpdateDialogNodeParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'dialog_node', 'new_dialog_node']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const body = { - dialog_node: _params.new_dialog_node, - description: _params.new_description, - conditions: _params.new_conditions, - parent: _params.new_parent, - previous_sibling: _params.new_previous_sibling, - output: _params.new_output, - context: _params.new_context, - metadata: _params.new_metadata, - next_step: _params.new_next_step, - title: _params.new_title, - type: _params.new_type, - event_name: _params.new_event_name, - variable: _params.new_variable, - actions: _params.new_actions - }; - const path = { - workspace_id: _params.workspace_id, - dialog_node: _params.dialog_node - }; - const parameters = { - options: { - url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', - method: 'POST', - json: true, - body: body, - path: path, - }, - defaultOptions: extend(true, this._options, { - headers: { - 'accept': 'application/json', - 'content-type': 'application/json', - } - }) - }; - return createRequest(parameters, _callback); - }; - - /************************* - * entities - ************************/ - - /** - * Create entity. - * - * Create a new entity. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} [params.description] - The description of the entity. - * @param {Object} [params.metadata] - Any metadata related to the value. - * @param {CreateValue[]} [params.values] - An array of entity values. - * @param {boolean} [params.fuzzy_match] - Whether to use fuzzy matching for the entity. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - createEntity(params: ConversationV1.CreateEntityParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const body = { - entity: _params.entity, - description: _params.description, - metadata: _params.metadata, - values: _params.values, - fuzzy_match: _params.fuzzy_match - }; - const path = { - workspace_id: _params.workspace_id - }; - const parameters = { - options: { - url: '/v1/workspaces/{workspace_id}/entities', - method: 'POST', - json: true, - body: body, - path: path, - }, - defaultOptions: extend(true, this._options, { - headers: { - 'accept': 'application/json', - 'content-type': 'application/json', - } - }) - }; - return createRequest(parameters, _callback); - }; - - /** - * Delete entity. - * - * Delete an entity from a workspace. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - deleteEntity(params: ConversationV1.DeleteEntityParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const path = { - workspace_id: _params.workspace_id, - entity: _params.entity - }; - const parameters = { - options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}', - method: 'DELETE', - path: path, - }, - defaultOptions: extend(true, this._options, { - headers: { - 'accept': 'application/json', - } - }) - }; - return createRequest(parameters, _callback); - }; - - /** - * Get entity. - * - * Get information about an entity, optionally including all entity content. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - getEntity(params: ConversationV1.GetEntityParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const query = { - export: _params.export - }; - const path = { - workspace_id: _params.workspace_id, - entity: _params.entity - }; - const parameters = { - options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}', - method: 'GET', - qs: query, - path: path, - }, - defaultOptions: extend(true, this._options, { - headers: { - 'accept': 'application/json', - } - }) - }; - return createRequest(parameters, _callback); - }; - - /** - * List entities. - * - * List the entities for a workspace. + * List the intents for a workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. @@ -698,92 +528,94 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listEntities(params: ConversationV1.ListEntitiesParams, callback?: ConversationV1.Callback): ReadableStream | void { + listIntents( + params: GeneratedConversationV1.ListIntentsParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { export: _params.export, page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities', + url: '/v1/workspaces/{workspace_id}/intents', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update entity. + * Update intent. * - * Update an existing entity with new or modified data. + * Update an existing intent with new or modified data. You must provide data defining the content of the updated intent. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} [params.new_entity] - The name of the entity. - * @param {string} [params.new_description] - The description of the entity. - * @param {Object} [params.new_metadata] - Any metadata related to the entity. - * @param {boolean} [params.new_fuzzy_match] - Whether to use fuzzy matching for the entity. - * @param {CreateValue[]} [params.new_values] - An array of entity values. + * @param {string} params.intent - The intent name (for example, `pizza_order`). + * @param {string} [params.new_intent] - The name of the intent. + * @param {string} [params.new_description] - The description of the intent. + * @param {CreateExample[]} [params.new_examples] - An array of user input examples for the intent. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateEntity(params: ConversationV1.UpdateEntityParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateIntent( + params: GeneratedConversationV1.UpdateIntentParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'intent']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - entity: _params.new_entity, + const body = { + intent: _params.new_intent, description: _params.new_description, - metadata: _params.new_metadata, - fuzzy_match: _params.new_fuzzy_match, - values: _params.new_values + examples: _params.new_examples }; - const path = { + const path = { workspace_id: _params.workspace_id, - entity: _params.entity + intent: _params.intent }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}', + url: '/v1/workspaces/{workspace_id}/intents/{intent}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* * examples @@ -801,18 +633,21 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createExample(params: ConversationV1.CreateExampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + createExample( + params: GeneratedConversationV1.CreateExampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'intent', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { text: _params.text }; - const path = { + const path = { workspace_id: _params.workspace_id, intent: _params.intent }; @@ -822,17 +657,17 @@ class ConversationV1 extends BaseService { method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Delete user input example. @@ -846,15 +681,18 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteExample(params: ConversationV1.DeleteExampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteExample( + params: GeneratedConversationV1.DeleteExampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'intent', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, intent: _params.intent, text: _params.text @@ -863,16 +701,16 @@ class ConversationV1 extends BaseService { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Get user input example. @@ -886,15 +724,18 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getExample(params: ConversationV1.GetExampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + getExample( + params: GeneratedConversationV1.GetExampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'intent', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, intent: _params.intent, text: _params.text @@ -903,16 +744,16 @@ class ConversationV1 extends BaseService { options: { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}', method: 'GET', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * List user input examples. @@ -929,21 +770,24 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listExamples(params: ConversationV1.ListExamplesParams, callback?: ConversationV1.Callback): ReadableStream | void { + listExamples( + params: GeneratedConversationV1.ListExamplesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'intent']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { + const path = { workspace_id: _params.workspace_id, intent: _params.intent }; @@ -952,16 +796,16 @@ class ConversationV1 extends BaseService { url: '/v1/workspaces/{workspace_id}/intents/{intent}/examples', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Update user input example. @@ -976,18 +820,21 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateExample(params: ConversationV1.UpdateExampleParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateExample( + params: GeneratedConversationV1.UpdateExampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'intent', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { text: _params.new_text }; - const path = { + const path = { workspace_id: _params.workspace_id, intent: _params.intent, text: _params.text @@ -998,154 +845,167 @@ class ConversationV1 extends BaseService { method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* - * intents + * entities ************************/ /** - * Create intent. + * Create entity. * - * Create a new intent. + * Create a new entity. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.intent - The name of the intent. - * @param {string} [params.description] - The description of the intent. - * @param {CreateExample[]} [params.examples] - An array of user input examples. + * @param {string} params.entity - The name of the entity. + * @param {string} [params.description] - The description of the entity. + * @param {Object} [params.metadata] - Any metadata related to the value. + * @param {CreateValue[]} [params.values] - An array of entity values. + * @param {boolean} [params.fuzzy_match] - Whether to use fuzzy matching for the entity. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createIntent(params: ConversationV1.CreateIntentParams, callback?: ConversationV1.Callback): ReadableStream | void { + createEntity( + params: GeneratedConversationV1.CreateEntityParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'intent']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - intent: _params.intent, + const body = { + entity: _params.entity, description: _params.description, - examples: _params.examples + metadata: _params.metadata, + values: _params.values, + fuzzy_match: _params.fuzzy_match }; - const path = { + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/intents', + url: '/v1/workspaces/{workspace_id}/entities', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Delete intent. + * Delete entity. * - * Delete an intent from a workspace. + * Delete an entity from a workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.intent - The intent name (for example, `pizza_order`). + * @param {string} params.entity - The name of the entity. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteIntent(params: ConversationV1.DeleteIntentParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteEntity( + params: GeneratedConversationV1.DeleteEntityParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'intent']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, - intent: _params.intent + entity: _params.entity }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/intents/{intent}', + url: '/v1/workspaces/{workspace_id}/entities/{entity}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Get intent. + * Get entity. * - * Get information about an intent, optionally including all intent content. + * Get information about an entity, optionally including all entity content. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.intent - The intent name (for example, `pizza_order`). + * @param {string} params.entity - The name of the entity. * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getIntent(params: ConversationV1.GetIntentParams, callback?: ConversationV1.Callback): ReadableStream | void { + getEntity( + params: GeneratedConversationV1.GetEntityParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'intent']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { export: _params.export }; - const path = { + const path = { workspace_id: _params.workspace_id, - intent: _params.intent + entity: _params.entity }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/intents/{intent}', + url: '/v1/workspaces/{workspace_id}/entities/{entity}', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * List intents. + * List entities. * - * List the intents for a workspace. + * List the entities for a workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. @@ -1157,194 +1017,361 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listIntents(params: ConversationV1.ListIntentsParams, callback?: ConversationV1.Callback): ReadableStream | void { + listEntities( + params: GeneratedConversationV1.ListEntitiesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { export: _params.export, page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/intents', + url: '/v1/workspaces/{workspace_id}/entities', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update intent. + * Update entity. * - * Update an existing intent with new or modified data. You must provide data defining the content of the updated intent. + * Update an existing entity with new or modified data. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.intent - The intent name (for example, `pizza_order`). - * @param {string} [params.new_intent] - The name of the intent. - * @param {string} [params.new_description] - The description of the intent. - * @param {CreateExample[]} [params.new_examples] - An array of user input examples for the intent. + * @param {string} params.entity - The name of the entity. + * @param {string} [params.new_entity] - The name of the entity. + * @param {string} [params.new_description] - The description of the entity. + * @param {Object} [params.new_metadata] - Any metadata related to the entity. + * @param {boolean} [params.new_fuzzy_match] - Whether to use fuzzy matching for the entity. + * @param {CreateValue[]} [params.new_values] - An array of entity values. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateIntent(params: ConversationV1.UpdateIntentParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateEntity( + params: GeneratedConversationV1.UpdateEntityParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'intent']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - intent: _params.new_intent, + const body = { + entity: _params.new_entity, description: _params.new_description, - examples: _params.new_examples + metadata: _params.new_metadata, + fuzzy_match: _params.new_fuzzy_match, + values: _params.new_values }; - const path = { + const path = { workspace_id: _params.workspace_id, - intent: _params.intent + entity: _params.entity }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/intents/{intent}', + url: '/v1/workspaces/{workspace_id}/entities/{entity}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* - * logs + * values ************************/ /** - * List log events in a workspace. + * Add entity value. * - * List log events in a specific workspace. + * Create a new value for an entity. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. - * @param {string} [params.filter] - A cacheable parameter that limits the results to those matching the specified filter. For more information, see the [documentation](https://console.bluemix.net/docs/services/conversation/filter-reference.html#filter-query-syntax). + * @param {string} params.entity - The name of the entity. + * @param {string} params.value - The text of the entity value. + * @param {Object} [params.metadata] - Any metadata related to the entity value. + * @param {string[]} [params.synonyms] - An array of synonyms for the entity value. + * @param {string[]} [params.patterns] - An array of patterns for the entity value. A pattern is specified as a regular expression. + * @param {string} [params.value_type] - Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + createValue( + params: GeneratedConversationV1.CreateValueParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const body = { + value: _params.value, + metadata: _params.metadata, + synonyms: _params.synonyms, + patterns: _params.patterns, + type: _params.value_type + }; + const path = { + workspace_id: _params.workspace_id, + entity: _params.entity + }; + const parameters = { + options: { + url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', + method: 'POST', + json: true, + body: body, + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Delete entity value. + * + * Delete a value for an entity. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.entity - The name of the entity. + * @param {string} params.value - The text of the entity value. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + deleteValue( + params: GeneratedConversationV1.DeleteValueParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const path = { + workspace_id: _params.workspace_id, + entity: _params.entity, + value: _params.value + }; + const parameters = { + options: { + url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', + method: 'DELETE', + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Get entity value. + * + * Get information about an entity value. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.entity - The name of the entity. + * @param {string} params.value - The text of the entity value. + * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + getValue( + params: GeneratedConversationV1.GetValueParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity', 'value']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { + export: _params.export + }; + const path = { + workspace_id: _params.workspace_id, + entity: _params.entity, + value: _params.value + }; + const parameters = { + options: { + url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', + method: 'GET', + qs: query, + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * List entity values. + * + * List the values for an entity. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.entity - The name of the entity. + * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. + * @param {boolean} [params.include_count] - Whether to include information about the number of records returned. + * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. * @param {string} [params.cursor] - A token identifying the last value from the previous page of results. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listLogs(params: ConversationV1.ListLogsParams, callback?: ConversationV1.Callback): ReadableStream | void { + listValues( + params: GeneratedConversationV1.ListValuesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { - sort: _params.sort, - filter: _params.filter, + const query = { + export: _params.export, page_limit: _params.page_limit, + include_count: _params.include_count, + sort: _params.sort, cursor: _params.cursor }; - const path = { - workspace_id: _params.workspace_id + const path = { + workspace_id: _params.workspace_id, + entity: _params.entity }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/logs', + url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; - - /************************* - * message - ************************/ + } /** - * Get a response to a user's input. + * Update entity value. + * + * Update the content of a value for an entity. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.workspace_id - Unique identifier of the workspace. - * @param {InputData} [params.input] - An input object that includes the input text. - * @param {boolean} [params.alternate_intents] - Whether to return more than one intent. Set to `true` to return all matching intents. - * @param {Context} [params.context] - State information for the conversation. Continue a conversation by including the context object from the previous response. - * @param {RuntimeEntity[]} [params.entities] - Include the entities from the previous response when they do not need to change and to prevent Watson from trying to identify them. - * @param {RuntimeIntent[]} [params.intents] - An array of name-confidence pairs for the user input. Include the intents from the previous response when they do not need to change and to prevent Watson from trying to identify them. - * @param {OutputData} [params.output] - System output. Include the output from the request when you have several requests within the same Dialog turn to pass back in the intermediate information. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.entity - The name of the entity. + * @param {string} params.value - The text of the entity value. + * @param {string} [params.new_value] - The text of the entity value. + * @param {Object} [params.new_metadata] - Any metadata related to the entity value. + * @param {string} [params.new_type] - Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. + * @param {string[]} [params.new_synonyms] - An array of synonyms for the entity value. + * @param {string[]} [params.new_patterns] - An array of patterns for the entity value. A pattern is specified as a regular expression. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - message(params: ConversationV1.MessageParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateValue( + params: GeneratedConversationV1.UpdateValueParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'entity', 'value']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - input: _params.input, - alternate_intents: _params.alternate_intents, - context: _params.context, - entities: _params.entities, - intents: _params.intents, - output: _params.output + const body = { + value: _params.new_value, + metadata: _params.new_metadata, + type: _params.new_type, + synonyms: _params.new_synonyms, + patterns: _params.new_patterns }; - const path = { - workspace_id: _params.workspace_id + const path = { + workspace_id: _params.workspace_id, + entity: _params.entity, + value: _params.value }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/message', + url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* * synonyms @@ -1363,39 +1390,43 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createSynonym(params: ConversationV1.CreateSynonymParams, callback?: ConversationV1.Callback): ReadableStream | void { + createSynonym( + params: GeneratedConversationV1.CreateSynonymParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { synonym: _params.synonym }; - const path = { + const path = { workspace_id: _params.workspace_id, entity: _params.entity, value: _params.value }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', + url: + '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Delete entity value synonym. @@ -1410,15 +1441,18 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteSynonym(params: ConversationV1.DeleteSynonymParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteSynonym( + params: GeneratedConversationV1.DeleteSynonymParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, entity: _params.entity, value: _params.value, @@ -1426,18 +1460,19 @@ class ConversationV1 extends BaseService { }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', + url: + '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Get entity value synonym. @@ -1452,15 +1487,18 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getSynonym(params: ConversationV1.GetSynonymParams, callback?: ConversationV1.Callback): ReadableStream | void { + getSynonym( + params: GeneratedConversationV1.GetSynonymParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, entity: _params.entity, value: _params.value, @@ -1468,18 +1506,19 @@ class ConversationV1 extends BaseService { }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', + url: + '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', method: 'GET', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * List entity value synonyms. @@ -1497,40 +1536,44 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listSynonyms(params: ConversationV1.ListSynonymsParams, callback?: ConversationV1.Callback): ReadableStream | void { + listSynonyms( + params: GeneratedConversationV1.ListSynonymsParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'entity', 'value']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { + const path = { workspace_id: _params.workspace_id, entity: _params.entity, value: _params.value }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', + url: + '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Update entity value synonym. @@ -1546,18 +1589,21 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateSynonym(params: ConversationV1.UpdateSynonymParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateSynonym( + params: GeneratedConversationV1.UpdateSynonymParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id', 'entity', 'value', 'synonym']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { synonym: _params.new_synonym }; - const path = { + const path = { workspace_id: _params.workspace_id, entity: _params.entity, value: _params.value, @@ -1565,173 +1611,188 @@ class ConversationV1 extends BaseService { }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', + url: + '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* - * values + * dialogNodes ************************/ /** - * Add entity value. + * Create dialog node. * - * Create a new value for an entity. + * Create a dialog node. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} params.value - The text of the entity value. - * @param {Object} [params.metadata] - Any metadata related to the entity value. - * @param {string[]} [params.synonyms] - An array of synonyms for the entity value. - * @param {string[]} [params.patterns] - An array of patterns for the entity value. A pattern is specified as a regular expression. - * @param {string} [params.value_type] - Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. + * @param {string} params.dialog_node - The dialog node ID. + * @param {string} [params.description] - The description of the dialog node. + * @param {string} [params.conditions] - The condition that will trigger the dialog node. + * @param {string} [params.parent] - The ID of the parent dialog node (if any). + * @param {string} [params.previous_sibling] - The previous dialog node. + * @param {Object} [params.output] - The output of the dialog node. + * @param {Object} [params.context] - The context for the dialog node. + * @param {Object} [params.metadata] - The metadata for the dialog node. + * @param {DialogNodeNextStep} [params.next_step] - The next step to execute following this dialog node. + * @param {DialogNodeAction[]} [params.actions] - The actions for the dialog node. + * @param {string} [params.title] - The alias used to identify the dialog node. + * @param {string} [params.node_type] - How the dialog node is processed. + * @param {string} [params.event_name] - How an `event_handler` node is processed. + * @param {string} [params.variable] - The location in the dialog context where output is stored. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createValue(params: ConversationV1.CreateValueParams, callback?: ConversationV1.Callback): ReadableStream | void { + createDialogNode( + params: GeneratedConversationV1.CreateDialogNodeParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity', 'value']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'dialog_node']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - value: _params.value, + const body = { + dialog_node: _params.dialog_node, + description: _params.description, + conditions: _params.conditions, + parent: _params.parent, + previous_sibling: _params.previous_sibling, + output: _params.output, + context: _params.context, metadata: _params.metadata, - synonyms: _params.synonyms, - patterns: _params.patterns, - type: _params.value_type + next_step: _params.next_step, + actions: _params.actions, + title: _params.title, + type: _params.node_type, + event_name: _params.event_name, + variable: _params.variable }; - const path = { - workspace_id: _params.workspace_id, - entity: _params.entity + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', + url: '/v1/workspaces/{workspace_id}/dialog_nodes', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Delete entity value. + * Delete dialog node. * - * Delete a value for an entity. + * Delete a dialog node from the workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} params.value - The text of the entity value. + * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteValue(params: ConversationV1.DeleteValueParams, callback?: ConversationV1.Callback): ReadableStream | void { + deleteDialogNode( + params: GeneratedConversationV1.DeleteDialogNodeParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity', 'value']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'dialog_node']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { workspace_id: _params.workspace_id, - entity: _params.entity, - value: _params.value + dialog_node: _params.dialog_node }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', + url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Get entity value. + * Get dialog node. * - * Get information about an entity value. + * Get information about a dialog node. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} params.value - The text of the entity value. - * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getValue(params: ConversationV1.GetValueParams, callback?: ConversationV1.Callback): ReadableStream | void { + getDialogNode( + params: GeneratedConversationV1.GetDialogNodeParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity', 'value']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'dialog_node']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { - export: _params.export - }; - const path = { + const path = { workspace_id: _params.workspace_id, - entity: _params.entity, - value: _params.value + dialog_node: _params.dialog_node }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', + url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', method: 'GET', - qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * List entity values. + * List dialog nodes. * - * List the values for an entity. + * List the dialog nodes in the workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. * @param {boolean} [params.include_count] - Whether to include information about the number of records returned. * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. @@ -1739,232 +1800,354 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listValues(params: ConversationV1.ListValuesParams, callback?: ConversationV1.Callback): ReadableStream | void { + listDialogNodes( + params: GeneratedConversationV1.ListDialogNodesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { - export: _params.export, + const query = { page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; - const path = { - workspace_id: _params.workspace_id, - entity: _params.entity + const path = { + workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values', + url: '/v1/workspaces/{workspace_id}/dialog_nodes', method: 'GET', qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update entity value. + * Update dialog node. * - * Update the content of a value for an entity. + * Update information for a dialog node. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} params.entity - The name of the entity. - * @param {string} params.value - The text of the entity value. - * @param {string} [params.new_value] - The text of the entity value. - * @param {Object} [params.new_metadata] - Any metadata related to the entity value. - * @param {string} [params.new_type] - Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. - * @param {string[]} [params.new_synonyms] - An array of synonyms for the entity value. - * @param {string[]} [params.new_patterns] - An array of patterns for the entity value. A pattern is specified as a regular expression. + * @param {string} params.dialog_node - The dialog node ID (for example, `get_order`). + * @param {string} params.new_dialog_node - The dialog node ID. + * @param {string} [params.new_description] - The description of the dialog node. + * @param {string} [params.new_conditions] - The condition that will trigger the dialog node. + * @param {string} [params.new_parent] - The ID of the parent dialog node (if any). + * @param {string} [params.new_previous_sibling] - The previous dialog node. + * @param {Object} [params.new_output] - The output of the dialog node. + * @param {Object} [params.new_context] - The context for the dialog node. + * @param {Object} [params.new_metadata] - The metadata for the dialog node. + * @param {DialogNodeNextStep} [params.new_next_step] - The next step to execute following this dialog node. + * @param {string} [params.new_title] - The alias used to identify the dialog node. + * @param {string} [params.new_type] - How the node is processed. + * @param {string} [params.new_event_name] - How an `event_handler` node is processed. + * @param {string} [params.new_variable] - The location in the dialog context where output is stored. + * @param {DialogNodeAction[]} [params.new_actions] - The actions for the dialog node. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateValue(params: ConversationV1.UpdateValueParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateDialogNode( + params: GeneratedConversationV1.UpdateDialogNodeParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id', 'entity', 'value']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'dialog_node', 'new_dialog_node']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - value: _params.new_value, + const body = { + dialog_node: _params.new_dialog_node, + description: _params.new_description, + conditions: _params.new_conditions, + parent: _params.new_parent, + previous_sibling: _params.new_previous_sibling, + output: _params.new_output, + context: _params.new_context, metadata: _params.new_metadata, + next_step: _params.new_next_step, + title: _params.new_title, type: _params.new_type, - synonyms: _params.new_synonyms, - patterns: _params.new_patterns + event_name: _params.new_event_name, + variable: _params.new_variable, + actions: _params.new_actions }; - const path = { + const path = { workspace_id: _params.workspace_id, - entity: _params.entity, - value: _params.value + dialog_node: _params.dialog_node }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}', + url: '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /************************* - * workspaces + * logs ************************/ /** - * Create workspace. + * List log events in all workspaces. * - * Create a workspace based on component objects. You must provide workspace components defining the content of the new workspace. + * List log events in all workspaces in the service instance. * - * @param {Object} [params] - The parameters to send to the service. - * @param {string} [params.name] - The name of the workspace. - * @param {string} [params.description] - The description of the workspace. - * @param {string} [params.language] - The language of the workspace. - * @param {CreateIntent[]} [params.intents] - An array of objects defining the intents for the workspace. - * @param {CreateEntity[]} [params.entities] - An array of objects defining the entities for the workspace. - * @param {CreateDialogNode[]} [params.dialog_nodes] - An array of objects defining the nodes in the workspace dialog. - * @param {CreateCounterexample[]} [params.counterexamples] - An array of objects defining input examples that have been marked as irrelevant input. - * @param {Object} [params.metadata] - Any metadata related to the workspace. - * @param {boolean} [params.learning_opt_out] - Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. + * @param {Object} params - The parameters to send to the service. + * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. + * @param {string} params.filter - A cacheable parameter that limits the results to those matching the specified filter. You must specify a filter query that includes a value for `language`, as well as a value for `workspace_id` or `request.context.metadata.deployment`. For more information, see the [documentation](https://console.bluemix.net/docs/services/conversation/filter-reference.html#filter-query-syntax). + * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. + * @param {string} [params.cursor] - A token identifying the last value from the previous page of results. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createWorkspace(params?: ConversationV1.CreateWorkspaceParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); - const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {}; - const body = { - name: _params.name, - description: _params.description, - language: _params.language, - intents: _params.intents, - entities: _params.entities, - dialog_nodes: _params.dialog_nodes, - counterexamples: _params.counterexamples, - metadata: _params.metadata, - learning_opt_out: _params.learning_opt_out + listAllLogs( + params: GeneratedConversationV1.ListAllLogsParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['filter']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { + sort: _params.sort, + filter: _params.filter, + page_limit: _params.page_limit, + cursor: _params.cursor }; const parameters = { options: { - url: '/v1/workspaces', - method: 'POST', - json: true, - body: body, + url: '/v1/logs', + method: 'GET', + qs: query }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Delete workspace. + * List log events in a workspace. * - * Delete a workspace from the service instance. + * List log events in a specific workspace. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. + * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. + * @param {string} [params.filter] - A cacheable parameter that limits the results to those matching the specified filter. For more information, see the [documentation](https://console.bluemix.net/docs/services/conversation/filter-reference.html#filter-query-syntax). + * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. + * @param {string} [params.cursor] - A token identifying the last value from the previous page of results. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteWorkspace(params: ConversationV1.DeleteWorkspaceParams, callback?: ConversationV1.Callback): ReadableStream | void { + listLogs( + params: GeneratedConversationV1.ListLogsParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['workspace_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const query = { + sort: _params.sort, + filter: _params.filter, + page_limit: _params.page_limit, + cursor: _params.cursor + }; + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}', - method: 'DELETE', - path: path, + url: '/v1/workspaces/{workspace_id}/logs', + method: 'GET', + qs: query, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } + + /************************* + * counterexamples + ************************/ /** - * Get information about a workspace. + * Create counterexample. * - * Get information about a workspace, optionally including all workspace content. + * Add a new counterexample to a workspace. Counterexamples are examples that have been marked as irrelevant input. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {boolean} [params.export] - Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + * @param {string} params.text - The text of a user input marked as irrelevant input. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getWorkspace(params: ConversationV1.GetWorkspaceParams, callback?: ConversationV1.Callback): ReadableStream | void { + createCounterexample( + params: GeneratedConversationV1.CreateCounterexampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { - export: _params.export + const body = { + text: _params.text }; - const path = { + const path = { workspace_id: _params.workspace_id }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}', + url: '/v1/workspaces/{workspace_id}/counterexamples', + method: 'POST', + json: true, + body: body, + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Delete counterexample. + * + * Delete a counterexample from a workspace. Counterexamples are examples that have been marked as irrelevant input. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + deleteCounterexample( + params: GeneratedConversationV1.DeleteCounterexampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const path = { + workspace_id: _params.workspace_id, + text: _params.text + }; + const parameters = { + options: { + url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', + method: 'DELETE', + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Get counterexample. + * + * Get information about a counterexample. Counterexamples are examples that have been marked as irrelevant input. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. + * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + getCounterexample( + params: GeneratedConversationV1.GetCounterexampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'text']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const path = { + workspace_id: _params.workspace_id, + text: _params.text + }; + const parameters = { + options: { + url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', method: 'GET', - qs: query, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * List workspaces. + * List counterexamples. * - * List the workspaces associated with a Conversation service instance. + * List the counterexamples for a workspace. Counterexamples are examples that have been marked as irrelevant input. * - * @param {Object} [params] - The parameters to send to the service. + * @param {Object} params - The parameters to send to the service. + * @param {string} params.workspace_id - The workspace ID. * @param {number} [params.page_limit] - The number of records to return in each page of results. The default page limit is 100. * @param {boolean} [params.include_count] - Whether to include information about the number of records returned. * @param {string} [params.sort] - Sorts the response according to the value of the specified property, in ascending or descending order. @@ -1972,104 +2155,100 @@ class ConversationV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listWorkspaces(params?: ConversationV1.ListWorkspacesParams, callback?: ConversationV1.Callback): ReadableStream | void { - const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); - const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {}; - const query = { + listCounterexamples( + params: GeneratedConversationV1.ListCounterexamplesParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { page_limit: _params.page_limit, include_count: _params.include_count, sort: _params.sort, cursor: _params.cursor }; + const path = { + workspace_id: _params.workspace_id + }; const parameters = { options: { - url: '/v1/workspaces', + url: '/v1/workspaces/{workspace_id}/counterexamples', method: 'GET', qs: query, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** - * Update workspace. + * Update counterexample. * - * Update an existing workspace with new or modified data. You must provide component objects defining the content of the updated workspace. + * Update the text of a counterexample. Counterexamples are examples that have been marked as irrelevant input. * * @param {Object} params - The parameters to send to the service. * @param {string} params.workspace_id - The workspace ID. - * @param {string} [params.name] - The name of the workspace. - * @param {string} [params.description] - The description of the workspace. - * @param {string} [params.language] - The language of the workspace. - * @param {CreateIntent[]} [params.intents] - An array of objects defining the intents for the workspace. - * @param {CreateEntity[]} [params.entities] - An array of objects defining the entities for the workspace. - * @param {CreateDialogNode[]} [params.dialog_nodes] - An array of objects defining the nodes in the workspace dialog. - * @param {CreateCounterexample[]} [params.counterexamples] - An array of objects defining input examples that have been marked as irrelevant input. - * @param {Object} [params.metadata] - Any metadata related to the workspace. - * @param {boolean} [params.learning_opt_out] - Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. + * @param {string} params.text - The text of a user input counterexample (for example, `What are you wearing?`). + * @param {string} [params.new_text] - The text of the example to be marked as irrelevant input. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateWorkspace(params: ConversationV1.UpdateWorkspaceParams, callback?: ConversationV1.Callback): ReadableStream | void { + updateCounterexample( + params: GeneratedConversationV1.UpdateCounterexampleParams, + callback?: GeneratedConversationV1.Callback + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; - const requiredParams = ['workspace_id']; + const _callback = callback ? callback : () => {}; + const requiredParams = ['workspace_id', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - name: _params.name, - description: _params.description, - language: _params.language, - intents: _params.intents, - entities: _params.entities, - dialog_nodes: _params.dialog_nodes, - counterexamples: _params.counterexamples, - metadata: _params.metadata, - learning_opt_out: _params.learning_opt_out + const body = { + text: _params.new_text }; - const path = { - workspace_id: _params.workspace_id + const path = { + workspace_id: _params.workspace_id, + text: _params.text }; const parameters = { options: { - url: '/v1/workspaces/{workspace_id}', + url: '/v1/workspaces/{workspace_id}/counterexamples/{text}', method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; - + } } -ConversationV1.prototype.name = 'conversation'; -ConversationV1.prototype.version = 'v1'; +GeneratedConversationV1.prototype.name = 'conversation'; +GeneratedConversationV1.prototype.version = 'v1'; /************************* * interfaces ************************/ -namespace ConversationV1 { - - export interface Empty { } - - export type Callback = (error: any, body?: T, response?: RequestResponse) => void; - +namespace GeneratedConversationV1 { + /** Options for the `GeneratedConversationV1` constructor. **/ export type Options = { version_date: string; url?: string; @@ -2077,708 +2256,1207 @@ namespace ConversationV1 { password?: string; use_unauthenticated?: boolean; headers?: object; - } + }; + + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} /************************* * request interfaces ************************/ - export interface CreateCounterexampleParams { - workspace_id: string; - text: string; + /** Parameters for the `createWorkspace` operation. **/ + export interface CreateWorkspaceParams { + /** The name of the workspace. **/ + name?: string; + /** The description of the workspace. **/ + description?: string; + /** The language of the workspace. **/ + language?: string; + /** An array of objects defining the intents for the workspace. **/ + intents?: CreateIntent[]; + /** An array of objects defining the entities for the workspace. **/ + entities?: CreateEntity[]; + /** An array of objects defining the nodes in the workspace dialog. **/ + dialog_nodes?: CreateDialogNode[]; + /** An array of objects defining input examples that have been marked as irrelevant input. **/ + counterexamples?: CreateCounterexample[]; + /** Any metadata related to the workspace. **/ + metadata?: Object; + /** Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. **/ + learning_opt_out?: boolean; } - export interface DeleteCounterexampleParams { + /** Parameters for the `deleteWorkspace` operation. **/ + export interface DeleteWorkspaceParams { + /** The workspace ID. **/ workspace_id: string; - text: string; } - export interface GetCounterexampleParams { + /** Parameters for the `getWorkspace` operation. **/ + export interface GetWorkspaceParams { + /** The workspace ID. **/ workspace_id: string; - text: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ + export?: boolean; } - export interface ListCounterexamplesParams { - workspace_id: string; + /** Parameters for the `listWorkspaces` operation. **/ + export interface ListWorkspacesParams { + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface UpdateCounterexampleParams { - workspace_id: string; - text: string; - new_text?: string; - } - - export interface CreateDialogNodeParams { + /** Parameters for the `updateWorkspace` operation. **/ + export interface UpdateWorkspaceParams { + /** The workspace ID. **/ workspace_id: string; - dialog_node: string; + /** The name of the workspace. **/ + name?: string; + /** The description of the workspace. **/ description?: string; - conditions?: string; - parent?: string; - previous_sibling?: string; - output?: Object; - context?: Object; + /** The language of the workspace. **/ + language?: string; + /** An array of objects defining the intents for the workspace. **/ + intents?: CreateIntent[]; + /** An array of objects defining the entities for the workspace. **/ + entities?: CreateEntity[]; + /** An array of objects defining the nodes in the workspace dialog. **/ + dialog_nodes?: CreateDialogNode[]; + /** An array of objects defining input examples that have been marked as irrelevant input. **/ + counterexamples?: CreateCounterexample[]; + /** Any metadata related to the workspace. **/ metadata?: Object; - next_step?: DialogNodeNextStep; - actions?: DialogNodeAction[]; - title?: string; - node_type?: CreateDialogNodeConstants.NodeType | string; - event_name?: CreateDialogNodeConstants.EventName | string; - variable?: string; - } - - export namespace CreateDialogNodeConstants { - export enum NodeType { - STANDARD = 'standard', - EVENT_HANDLER = 'event_handler', - FRAME = 'frame', - SLOT = 'slot', - RESPONSE_CONDITION = 'response_condition', - } - export enum EventName { - FOCUS = 'focus', - INPUT = 'input', - FILLED = 'filled', - VALIDATE = 'validate', - FILLED_MULTIPLE = 'filled_multiple', - GENERIC = 'generic', - NOMATCH = 'nomatch', - NOMATCH_RESPONSES_DEPLETED = 'nomatch_responses_depleted', - } - } - - export interface DeleteDialogNodeParams { - workspace_id: string; - dialog_node: string; - } - - export interface GetDialogNodeParams { - workspace_id: string; - dialog_node: string; - } - - export interface ListDialogNodesParams { - workspace_id: string; - page_limit?: number; - include_count?: boolean; - sort?: string; - cursor?: string; + /** Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. **/ + learning_opt_out?: boolean; } - export interface UpdateDialogNodeParams { + /** Parameters for the `message` operation. **/ + export interface MessageParams { + /** Unique identifier of the workspace. **/ workspace_id: string; - dialog_node: string; - new_dialog_node: string; - new_description?: string; - new_conditions?: string; - new_parent?: string; - new_previous_sibling?: string; - new_output?: Object; - new_context?: Object; - new_metadata?: Object; - new_next_step?: DialogNodeNextStep; - new_title?: string; - new_type?: UpdateDialogNodeConstants.NodeType | string; - new_event_name?: UpdateDialogNodeConstants.EventName | string; - new_variable?: string; - new_actions?: DialogNodeAction[]; - } - - export namespace UpdateDialogNodeConstants { - export enum NodeType { - STANDARD = 'standard', - EVENT_HANDLER = 'event_handler', - FRAME = 'frame', - SLOT = 'slot', - RESPONSE_CONDITION = 'response_condition', - } - export enum EventName { - FOCUS = 'focus', - INPUT = 'input', - FILLED = 'filled', - VALIDATE = 'validate', - FILLED_MULTIPLE = 'filled_multiple', - GENERIC = 'generic', - NOMATCH = 'nomatch', - NOMATCH_RESPONSES_DEPLETED = 'nomatch_responses_depleted', - } + /** An input object that includes the input text. **/ + input?: InputData; + /** Whether to return more than one intent. Set to `true` to return all matching intents. **/ + alternate_intents?: boolean; + /** State information for the conversation. Continue a conversation by including the context object from the previous response. **/ + context?: Context; + /** Include the entities from the previous response when they do not need to change and to prevent Watson from trying to identify them. **/ + entities?: RuntimeEntity[]; + /** An array of name-confidence pairs for the user input. Include the intents from the previous response when they do not need to change and to prevent Watson from trying to identify them. **/ + intents?: RuntimeIntent[]; + /** System output. Include the output from the request when you have several requests within the same Dialog turn to pass back in the intermediate information. **/ + output?: OutputData; } - export interface CreateEntityParams { + /** Parameters for the `createIntent` operation. **/ + export interface CreateIntentParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; + /** The name of the intent. **/ + intent: string; + /** The description of the intent. **/ description?: string; - metadata?: Object; - values?: CreateValue[]; - fuzzy_match?: boolean; + /** An array of user input examples. **/ + examples?: CreateExample[]; } - export interface DeleteEntityParams { + /** Parameters for the `deleteIntent` operation. **/ + export interface DeleteIntentParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; + /** The intent name (for example, `pizza_order`). **/ + intent: string; } - export interface GetEntityParams { + /** Parameters for the `getIntent` operation. **/ + export interface GetIntentParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; + /** The intent name (for example, `pizza_order`). **/ + intent: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ export?: boolean; } - export interface ListEntitiesParams { + /** Parameters for the `listIntents` operation. **/ + export interface ListIntentsParams { + /** The workspace ID. **/ workspace_id: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ export?: boolean; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface UpdateEntityParams { + /** Parameters for the `updateIntent` operation. **/ + export interface UpdateIntentParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - new_entity?: string; + /** The intent name (for example, `pizza_order`). **/ + intent: string; + /** The name of the intent. **/ + new_intent?: string; + /** The description of the intent. **/ new_description?: string; - new_metadata?: Object; - new_fuzzy_match?: boolean; - new_values?: CreateValue[]; + /** An array of user input examples for the intent. **/ + new_examples?: CreateExample[]; } + /** Parameters for the `createExample` operation. **/ export interface CreateExampleParams { + /** The workspace ID. **/ workspace_id: string; + /** The intent name (for example, `pizza_order`). **/ intent: string; + /** The text of a user input example. **/ text: string; } + /** Parameters for the `deleteExample` operation. **/ export interface DeleteExampleParams { + /** The workspace ID. **/ workspace_id: string; + /** The intent name (for example, `pizza_order`). **/ intent: string; + /** The text of the user input example. **/ text: string; } + /** Parameters for the `getExample` operation. **/ export interface GetExampleParams { + /** The workspace ID. **/ workspace_id: string; + /** The intent name (for example, `pizza_order`). **/ intent: string; + /** The text of the user input example. **/ text: string; } + /** Parameters for the `listExamples` operation. **/ export interface ListExamplesParams { + /** The workspace ID. **/ workspace_id: string; + /** The intent name (for example, `pizza_order`). **/ intent: string; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } + /** Parameters for the `updateExample` operation. **/ export interface UpdateExampleParams { + /** The workspace ID. **/ workspace_id: string; + /** The intent name (for example, `pizza_order`). **/ intent: string; + /** The text of the user input example. **/ text: string; + /** The text of the user input example. **/ new_text?: string; } - export interface CreateIntentParams { + /** Parameters for the `createEntity` operation. **/ + export interface CreateEntityParams { + /** The workspace ID. **/ workspace_id: string; - intent: string; + /** The name of the entity. **/ + entity: string; + /** The description of the entity. **/ description?: string; - examples?: CreateExample[]; + /** Any metadata related to the value. **/ + metadata?: Object; + /** An array of entity values. **/ + values?: CreateValue[]; + /** Whether to use fuzzy matching for the entity. **/ + fuzzy_match?: boolean; } - export interface DeleteIntentParams { + /** Parameters for the `deleteEntity` operation. **/ + export interface DeleteEntityParams { + /** The workspace ID. **/ workspace_id: string; - intent: string; + /** The name of the entity. **/ + entity: string; } - export interface GetIntentParams { + /** Parameters for the `getEntity` operation. **/ + export interface GetEntityParams { + /** The workspace ID. **/ workspace_id: string; - intent: string; + /** The name of the entity. **/ + entity: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ export?: boolean; } - export interface ListIntentsParams { + /** Parameters for the `listEntities` operation. **/ + export interface ListEntitiesParams { + /** The workspace ID. **/ workspace_id: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ export?: boolean; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface UpdateIntentParams { + /** Parameters for the `updateEntity` operation. **/ + export interface UpdateEntityParams { + /** The workspace ID. **/ workspace_id: string; - intent: string; - new_intent?: string; + /** The name of the entity. **/ + entity: string; + /** The name of the entity. **/ + new_entity?: string; + /** The description of the entity. **/ new_description?: string; - new_examples?: CreateExample[]; + /** Any metadata related to the entity. **/ + new_metadata?: Object; + /** Whether to use fuzzy matching for the entity. **/ + new_fuzzy_match?: boolean; + /** An array of entity values. **/ + new_values?: CreateValue[]; } - export interface ListLogsParams { + /** Parameters for the `createValue` operation. **/ + export interface CreateValueParams { + /** The workspace ID. **/ workspace_id: string; - sort?: string; - filter?: string; + /** The name of the entity. **/ + entity: string; + /** The text of the entity value. **/ + value: string; + /** Any metadata related to the entity value. **/ + metadata?: Object; + /** An array of synonyms for the entity value. **/ + synonyms?: string[]; + /** An array of patterns for the entity value. A pattern is specified as a regular expression. **/ + patterns?: string[]; + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ + value_type?: CreateValueConstants.ValueType | string; + } + + /** Constants for the `createValue` operation. **/ + export namespace CreateValueConstants { + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ + export enum ValueType { + SYNONYMS = 'synonyms', + PATTERNS = 'patterns' + } + } + + /** Parameters for the `deleteValue` operation. **/ + export interface DeleteValueParams { + /** The workspace ID. **/ + workspace_id: string; + /** The name of the entity. **/ + entity: string; + /** The text of the entity value. **/ + value: string; + } + + /** Parameters for the `getValue` operation. **/ + export interface GetValueParams { + /** The workspace ID. **/ + workspace_id: string; + /** The name of the entity. **/ + entity: string; + /** The text of the entity value. **/ + value: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ + export?: boolean; + } + + /** Parameters for the `listValues` operation. **/ + export interface ListValuesParams { + /** The workspace ID. **/ + workspace_id: string; + /** The name of the entity. **/ + entity: string; + /** Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. **/ + export?: boolean; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ + include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ + sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface MessageParams { + /** Parameters for the `updateValue` operation. **/ + export interface UpdateValueParams { + /** The workspace ID. **/ workspace_id: string; - input?: InputData; - alternate_intents?: boolean; - context?: Context; - entities?: RuntimeEntity[]; - intents?: RuntimeIntent[]; - output?: OutputData; + /** The name of the entity. **/ + entity: string; + /** The text of the entity value. **/ + value: string; + /** The text of the entity value. **/ + new_value?: string; + /** Any metadata related to the entity value. **/ + new_metadata?: Object; + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ + new_type?: UpdateValueConstants.ValueType | string; + /** An array of synonyms for the entity value. **/ + new_synonyms?: string[]; + /** An array of patterns for the entity value. A pattern is specified as a regular expression. **/ + new_patterns?: string[]; + } + + /** Constants for the `updateValue` operation. **/ + export namespace UpdateValueConstants { + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ + export enum ValueType { + SYNONYMS = 'synonyms', + PATTERNS = 'patterns' + } } + /** Parameters for the `createSynonym` operation. **/ export interface CreateSynonymParams { + /** The workspace ID. **/ workspace_id: string; + /** The name of the entity. **/ entity: string; + /** The text of the entity value. **/ value: string; + /** The text of the synonym. **/ synonym: string; } + /** Parameters for the `deleteSynonym` operation. **/ export interface DeleteSynonymParams { + /** The workspace ID. **/ workspace_id: string; + /** The name of the entity. **/ entity: string; + /** The text of the entity value. **/ value: string; + /** The text of the synonym. **/ synonym: string; } + /** Parameters for the `getSynonym` operation. **/ export interface GetSynonymParams { + /** The workspace ID. **/ workspace_id: string; + /** The name of the entity. **/ entity: string; + /** The text of the entity value. **/ value: string; + /** The text of the synonym. **/ synonym: string; } + /** Parameters for the `listSynonyms` operation. **/ export interface ListSynonymsParams { + /** The workspace ID. **/ workspace_id: string; + /** The name of the entity. **/ entity: string; + /** The text of the entity value. **/ value: string; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } + /** Parameters for the `updateSynonym` operation. **/ export interface UpdateSynonymParams { + /** The workspace ID. **/ workspace_id: string; + /** The name of the entity. **/ entity: string; + /** The text of the entity value. **/ value: string; + /** The text of the synonym. **/ synonym: string; + /** The text of the synonym. **/ new_synonym?: string; } - export interface CreateValueParams { + /** Parameters for the `createDialogNode` operation. **/ + export interface CreateDialogNodeParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - value: string; + /** The dialog node ID. **/ + dialog_node: string; + /** The description of the dialog node. **/ + description?: string; + /** The condition that will trigger the dialog node. **/ + conditions?: string; + /** The ID of the parent dialog node (if any). **/ + parent?: string; + /** The previous dialog node. **/ + previous_sibling?: string; + /** The output of the dialog node. **/ + output?: Object; + /** The context for the dialog node. **/ + context?: Object; + /** The metadata for the dialog node. **/ metadata?: Object; - synonyms?: string[]; - patterns?: string[]; - value_type?: CreateValueConstants.ValueType | string; + /** The next step to execute following this dialog node. **/ + next_step?: DialogNodeNextStep; + /** The actions for the dialog node. **/ + actions?: DialogNodeAction[]; + /** The alias used to identify the dialog node. **/ + title?: string; + /** How the dialog node is processed. **/ + node_type?: CreateDialogNodeConstants.NodeType | string; + /** How an `event_handler` node is processed. **/ + event_name?: CreateDialogNodeConstants.EventName | string; + /** The location in the dialog context where output is stored. **/ + variable?: string; } - export namespace CreateValueConstants { - export enum ValueType { - SYNONYMS = 'synonyms', - PATTERNS = 'patterns', + /** Constants for the `createDialogNode` operation. **/ + export namespace CreateDialogNodeConstants { + /** How the dialog node is processed. **/ + export enum NodeType { + STANDARD = 'standard', + EVENT_HANDLER = 'event_handler', + FRAME = 'frame', + SLOT = 'slot', + RESPONSE_CONDITION = 'response_condition' + } + /** How an `event_handler` node is processed. **/ + export enum EventName { + FOCUS = 'focus', + INPUT = 'input', + FILLED = 'filled', + VALIDATE = 'validate', + FILLED_MULTIPLE = 'filled_multiple', + GENERIC = 'generic', + NOMATCH = 'nomatch', + NOMATCH_RESPONSES_DEPLETED = 'nomatch_responses_depleted' } } - export interface DeleteValueParams { + /** Parameters for the `deleteDialogNode` operation. **/ + export interface DeleteDialogNodeParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - value: string; + /** The dialog node ID (for example, `get_order`). **/ + dialog_node: string; } - export interface GetValueParams { + /** Parameters for the `getDialogNode` operation. **/ + export interface GetDialogNodeParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - value: string; - export?: boolean; + /** The dialog node ID (for example, `get_order`). **/ + dialog_node: string; } - export interface ListValuesParams { + /** Parameters for the `listDialogNodes` operation. **/ + export interface ListDialogNodesParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - export?: boolean; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface UpdateValueParams { + /** Parameters for the `updateDialogNode` operation. **/ + export interface UpdateDialogNodeParams { + /** The workspace ID. **/ workspace_id: string; - entity: string; - value: string; - new_value?: string; + /** The dialog node ID (for example, `get_order`). **/ + dialog_node: string; + /** The dialog node ID. **/ + new_dialog_node: string; + /** The description of the dialog node. **/ + new_description?: string; + /** The condition that will trigger the dialog node. **/ + new_conditions?: string; + /** The ID of the parent dialog node (if any). **/ + new_parent?: string; + /** The previous dialog node. **/ + new_previous_sibling?: string; + /** The output of the dialog node. **/ + new_output?: Object; + /** The context for the dialog node. **/ + new_context?: Object; + /** The metadata for the dialog node. **/ new_metadata?: Object; - new_type?: UpdateValueConstants.ValueType | string; - new_synonyms?: string[]; - new_patterns?: string[]; + /** The next step to execute following this dialog node. **/ + new_next_step?: DialogNodeNextStep; + /** The alias used to identify the dialog node. **/ + new_title?: string; + /** How the node is processed. **/ + new_type?: UpdateDialogNodeConstants.NodeType | string; + /** How an `event_handler` node is processed. **/ + new_event_name?: UpdateDialogNodeConstants.EventName | string; + /** The location in the dialog context where output is stored. **/ + new_variable?: string; + /** The actions for the dialog node. **/ + new_actions?: DialogNodeAction[]; } - export namespace UpdateValueConstants { - export enum ValueType { - SYNONYMS = 'synonyms', - PATTERNS = 'patterns', + /** Constants for the `updateDialogNode` operation. **/ + export namespace UpdateDialogNodeConstants { + /** How the node is processed. **/ + export enum NodeType { + STANDARD = 'standard', + EVENT_HANDLER = 'event_handler', + FRAME = 'frame', + SLOT = 'slot', + RESPONSE_CONDITION = 'response_condition' + } + /** How an `event_handler` node is processed. **/ + export enum EventName { + FOCUS = 'focus', + INPUT = 'input', + FILLED = 'filled', + VALIDATE = 'validate', + FILLED_MULTIPLE = 'filled_multiple', + GENERIC = 'generic', + NOMATCH = 'nomatch', + NOMATCH_RESPONSES_DEPLETED = 'nomatch_responses_depleted' } } - export interface CreateWorkspaceParams { - name?: string; - description?: string; - language?: string; - intents?: CreateIntent[]; - entities?: CreateEntity[]; - dialog_nodes?: CreateDialogNode[]; - counterexamples?: CreateCounterexample[]; - metadata?: Object; - learning_opt_out?: boolean; + /** Parameters for the `listAllLogs` operation. **/ + export interface ListAllLogsParams { + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ + sort?: string; + /** A cacheable parameter that limits the results to those matching the specified filter. You must specify a filter query that includes a value for `language`, as well as a value for `workspace_id` or `request.context.metadata.deployment`. For more information, see the [documentation](https://console.bluemix.net/docs/services/conversation/filter-reference.html#filter-query-syntax). **/ + filter: string; + /** The number of records to return in each page of results. The default page limit is 100. **/ + page_limit?: number; + /** A token identifying the last value from the previous page of results. **/ + cursor?: string; } - export interface DeleteWorkspaceParams { + /** Parameters for the `listLogs` operation. **/ + export interface ListLogsParams { + /** The workspace ID. **/ + workspace_id: string; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ + sort?: string; + /** A cacheable parameter that limits the results to those matching the specified filter. For more information, see the [documentation](https://console.bluemix.net/docs/services/conversation/filter-reference.html#filter-query-syntax). **/ + filter?: string; + /** The number of records to return in each page of results. The default page limit is 100. **/ + page_limit?: number; + /** A token identifying the last value from the previous page of results. **/ + cursor?: string; + } + + /** Parameters for the `createCounterexample` operation. **/ + export interface CreateCounterexampleParams { + /** The workspace ID. **/ workspace_id: string; + /** The text of a user input marked as irrelevant input. **/ + text: string; } - export interface GetWorkspaceParams { + /** Parameters for the `deleteCounterexample` operation. **/ + export interface DeleteCounterexampleParams { + /** The workspace ID. **/ workspace_id: string; - export?: boolean; + /** The text of a user input counterexample (for example, `What are you wearing?`). **/ + text: string; } - export interface ListWorkspacesParams { + /** Parameters for the `getCounterexample` operation. **/ + export interface GetCounterexampleParams { + /** The workspace ID. **/ + workspace_id: string; + /** The text of a user input counterexample (for example, `What are you wearing?`). **/ + text: string; + } + + /** Parameters for the `listCounterexamples` operation. **/ + export interface ListCounterexamplesParams { + /** The workspace ID. **/ + workspace_id: string; + /** The number of records to return in each page of results. The default page limit is 100. **/ page_limit?: number; + /** Whether to include information about the number of records returned. **/ include_count?: boolean; + /** Sorts the response according to the value of the specified property, in ascending or descending order. **/ sort?: string; + /** A token identifying the last value from the previous page of results. **/ cursor?: string; } - export interface UpdateWorkspaceParams { + /** Parameters for the `updateCounterexample` operation. **/ + export interface UpdateCounterexampleParams { + /** The workspace ID. **/ workspace_id: string; - name?: string; - description?: string; - language?: string; - intents?: CreateIntent[]; - entities?: CreateEntity[]; - dialog_nodes?: CreateDialogNode[]; - counterexamples?: CreateCounterexample[]; - metadata?: Object; - learning_opt_out?: boolean; + /** The text of a user input counterexample (for example, `What are you wearing?`). **/ + text: string; + /** The text of the example to be marked as irrelevant input. **/ + new_text?: string; } /************************* * model interfaces ************************/ + /** Context information for the message. Include the context from the previous response to maintain state for the conversation. **/ export interface Context { + /** The unique identifier of the conversation. **/ conversation_id: string; + /** For internal use only. **/ system: SystemResponse; } + /** Counterexample. **/ export interface Counterexample { + /** The text of the counterexample. **/ text: string; + /** The timestamp for creation of the counterexample. **/ created: string; + /** The timestamp for the last update to the counterexample. **/ updated: string; } + /** CounterexampleCollection. **/ export interface CounterexampleCollection { + /** An array of objects describing the examples marked as irrelevant input. **/ counterexamples: Counterexample[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** CreateCounterexample. **/ export interface CreateCounterexample { + /** The text of a user input marked as irrelevant input. **/ text: string; } + /** CreateDialogNode. **/ export interface CreateDialogNode { + /** The dialog node ID. **/ dialog_node: string; + /** The description of the dialog node. **/ description?: string; + /** The condition that will trigger the dialog node. **/ conditions?: string; + /** The ID of the parent dialog node (if any). **/ parent?: string; + /** The previous dialog node. **/ previous_sibling?: string; + /** The output of the dialog node. **/ output?: Object; + /** The context for the dialog node. **/ context?: Object; + /** The metadata for the dialog node. **/ metadata?: Object; + /** The next step to execute following this dialog node. **/ next_step?: DialogNodeNextStep; + /** The actions for the dialog node. **/ actions?: DialogNodeAction[]; + /** The alias used to identify the dialog node. **/ title?: string; + /** How the dialog node is processed. **/ node_type?: string; + /** How an `event_handler` node is processed. **/ event_name?: string; + /** The location in the dialog context where output is stored. **/ variable?: string; } + /** CreateEntity. **/ export interface CreateEntity { + /** The name of the entity. **/ entity: string; + /** The description of the entity. **/ description?: string; + /** Any metadata related to the value. **/ metadata?: Object; + /** An array of entity values. **/ values?: CreateValue[]; + /** Whether to use fuzzy matching for the entity. **/ fuzzy_match?: boolean; } + /** CreateExample. **/ export interface CreateExample { + /** The text of a user input example. **/ text: string; } + /** CreateIntent. **/ export interface CreateIntent { + /** The name of the intent. **/ intent: string; + /** The description of the intent. **/ description?: string; + /** An array of user input examples. **/ examples?: CreateExample[]; } + /** CreateValue. **/ export interface CreateValue { + /** The text of the entity value. **/ value: string; + /** Any metadata related to the entity value. **/ metadata?: Object; + /** An array of synonyms for the entity value. **/ synonyms?: string[]; + /** An array of patterns for the entity value. A pattern is specified as a regular expression. **/ patterns?: string[]; + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ value_type?: string; } + /** DialogNode. **/ export interface DialogNode { + /** The dialog node ID. **/ dialog_node_id: string; + /** The description of the dialog node. **/ description: string; + /** The condition that triggers the dialog node. **/ conditions: string; + /** The ID of the parent dialog node. **/ parent: string; + /** The ID of the previous sibling dialog node. **/ previous_sibling: string; + /** The output of the dialog node. **/ output: Object; + /** The context (if defined) for the dialog node. **/ context: Object; + /** The metadata (if any) for the dialog node. **/ metadata: Object; + /** The next step to execute following this dialog node. **/ next_step: DialogNodeNextStep; + /** The timestamp for creation of the dialog node. **/ created: string; + /** The timestamp for the most recent update to the dialog node. **/ updated?: string; + /** The actions for the dialog node. **/ actions?: DialogNodeAction[]; + /** The alias used to identify the dialog node. **/ title: string; + /** How the dialog node is processed. **/ node_type?: string; + /** How an `event_handler` node is processed. **/ event_name?: string; + /** The location in the dialog context where output is stored. **/ variable?: string; } + /** DialogNodeAction. **/ export interface DialogNodeAction { + /** The name of the action. **/ name: string; + /** The type of action to invoke. **/ action_type?: string; + /** A map of key/value pairs to be provided to the action. **/ parameters?: Object; + /** The location in the dialog context where the result of the action is stored. **/ result_variable: string; + /** The name of the context variable that the client application will use to pass in credentials for the action. **/ credentials?: string; } + /** DialogNodeCollection. **/ export interface DialogNodeCollection { dialog_nodes: DialogNode[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** The next step to execute following this dialog node. **/ export interface DialogNodeNextStep { + /** How the `next_step` reference is processed. **/ behavior: string; + /** The ID of the dialog node to process next. **/ dialog_node?: string; + /** Which part of the dialog node to process next. **/ selector?: string; } + /** Entity. **/ export interface Entity { + /** The name of the entity. **/ entity_name: string; + /** The timestamp for creation of the entity. **/ created: string; + /** The timestamp for the last update to the entity. **/ updated: string; + /** The description of the entity. **/ description?: string; + /** Any metadata related to the entity. **/ metadata?: Object; + /** Whether fuzzy matching is used for the entity. **/ fuzzy_match?: boolean; } + /** An array of entities. **/ export interface EntityCollection { + /** An array of entities. **/ entities: EntityExport[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** Example. **/ export interface Example { + /** The text of the example. **/ example_text: string; + /** The timestamp for creation of the example. **/ created: string; + /** The timestamp for the last update to the example. **/ updated: string; } + /** ExampleCollection. **/ export interface ExampleCollection { + /** An array of Example objects describing the examples defined for the intent. **/ examples: Example[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** An object defining the user input. **/ export interface InputData { + /** The text of the user input. **/ text: string; } + /** Intent. **/ export interface Intent { + /** The name of the intent. **/ intent_name: string; + /** The timestamp for creation of the intent. **/ created: string; + /** The timestamp for the last update to the intent. **/ updated: string; + /** The description of the intent. **/ description?: string; } + /** IntentCollection. **/ export interface IntentCollection { + /** An array of intents. **/ intents: IntentExport[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** LogCollection. **/ export interface LogCollection { + /** An array of log events. **/ logs: LogExport[]; + /** An object defining the pagination data for the returned objects. **/ pagination: LogPagination; } + /** Log message details. **/ export interface LogMessage { + /** The severity of the message. **/ level: string; + /** The text of the message. **/ msg: string; } + /** The pagination data for the returned objects. **/ export interface LogPagination { + /** The URL that will return the next page of results. **/ next_url?: string; + /** Reserved for future use. **/ matched?: number; } + /** An input object that includes the input text. **/ export interface MessageInput { + /** The user's input. **/ text?: string; } + /** A request formatted for the Conversation service. **/ export interface MessageRequest { + /** An input object that includes the input text. **/ input?: InputData; + /** Whether to return more than one intent. Set to `true` to return all matching intents. **/ alternate_intents?: boolean; + /** State information for the conversation. Continue a conversation by including the context object from the previous response. **/ context?: Context; + /** Include the entities from the previous response when they do not need to change and to prevent Watson from trying to identify them. **/ entities?: RuntimeEntity[]; + /** An array of name-confidence pairs for the user input. Include the intents from the previous response when they do not need to change and to prevent Watson from trying to identify them. **/ intents?: RuntimeIntent[]; + /** System output. Include the output from the request when you have several requests within the same Dialog turn to pass back in the intermediate information. **/ output?: OutputData; } + /** An output object that includes the response to the user, the nodes that were hit, and messages from the log. **/ export interface OutputData { + /** Up to 50 messages logged with the request. **/ log_messages: LogMessage[]; + /** An array of responses to the user. **/ text: string[]; + /** An array of the nodes that were triggered to create the response. **/ nodes_visited?: string[]; } + /** The pagination data for the returned objects. **/ export interface Pagination { + /** The URL that will return the same page of results. **/ refresh_url: string; + /** The URL that will return the next page of results. **/ next_url?: string; + /** Reserved for future use. **/ total?: number; + /** Reserved for future use. **/ matched?: number; } + /** A term from the request that was identified as an entity. **/ export interface RuntimeEntity { + /** The recognized entity from a term in the input. **/ entity: string; + /** Zero-based character offsets that indicate where the entity value begins and ends in the input text. **/ location: number[]; + /** The term in the input text that was recognized. **/ value: string; + /** A decimal percentage that represents Watson's confidence in the entity. **/ confidence?: number; + /** The metadata for the entity. **/ metadata?: Object; } + /** An intent identified in the user input. **/ export interface RuntimeIntent { + /** The name of the recognized intent. **/ intent: string; + /** A decimal percentage that represents Watson's confidence in the intent. **/ confidence: number; } + /** Synonym. **/ export interface Synonym { + /** The text of the synonym. **/ synonym_text: string; + /** The timestamp for creation of the synonym. **/ created: string; + /** The timestamp for the most recent update to the synonym. **/ updated: string; } + /** SynonymCollection. **/ export interface SynonymCollection { + /** An array of synonyms. **/ synonyms: Synonym[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } - export interface SystemResponse { - } + /** For internal use only. **/ + export interface SystemResponse {} + /** Value. **/ export interface Value { + /** The text of the entity value. **/ value_text: string; + /** Any metadata related to the entity value. **/ metadata?: Object; + /** The timestamp for creation of the entity value. **/ created: string; + /** The timestamp for the last update to the entity value. **/ updated: string; + /** An array of synonyms for the entity value. **/ synonyms?: string[]; + /** An array of patterns for the entity value. A pattern is specified as a regular expression. **/ patterns?: string[]; + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ value_type: string; } + /** ValueCollection. **/ export interface ValueCollection { + /** An array of entity values. **/ values: ValueExport[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** Workspace. **/ export interface Workspace { + /** The name of the workspace. **/ name: string; + /** The language of the workspace. **/ language: string; + /** The timestamp for creation of the workspace. **/ created: string; + /** The timestamp for the last update to the workspace. **/ updated: string; + /** The workspace ID. **/ workspace_id: string; + /** The description of the workspace. **/ description?: string; + /** Any metadata that is required by the workspace. **/ metadata?: Object; + /** Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. **/ learning_opt_out?: boolean; } + /** WorkspaceCollection. **/ export interface WorkspaceCollection { + /** An array of workspaces. **/ workspaces: Workspace[]; + /** An object defining the pagination data for the returned objects. **/ pagination: Pagination; } + /** EntityExport. **/ export interface EntityExport { + /** The name of the entity. **/ entity_name: string; + /** The timestamp for creation of the entity. **/ created: string; + /** The timestamp for the last update to the entity. **/ updated: string; + /** The description of the entity. **/ description?: string; + /** Any metadata related to the entity. **/ metadata?: Object; + /** Whether fuzzy matching is used for the entity. **/ fuzzy_match?: boolean; + /** An array of entity values. **/ values?: ValueExport[]; } + /** IntentExport. **/ export interface IntentExport { + /** The name of the intent. **/ intent_name: string; + /** The timestamp for creation of the intent. **/ created: string; + /** The timestamp for the last update to the intent. **/ updated: string; + /** The description of the intent. **/ description?: string; + /** An array of user input examples. **/ examples?: Example[]; } + /** LogExport. **/ export interface LogExport { + /** A request formatted for the Conversation service. **/ request: MessageRequest; + /** A response from the Conversation service. **/ response: MessageResponse; + /** A unique identifier for the logged message. **/ log_id: string; + /** The timestamp for receipt of the message. **/ request_timestamp: string; + /** The timestamp for the system response to the message. **/ response_timestamp: string; + /** The workspace ID. **/ workspace_id: string; + /** The language of the workspace where the message request was made. **/ language: string; } + /** A response from the Conversation service. **/ export interface MessageResponse { + /** The user input from the request. **/ input?: MessageInput; + /** An array of intents recognized in the user input, sorted in descending order of confidence. **/ intents: RuntimeIntent[]; + /** An array of entities identified in the user input. **/ entities: RuntimeEntity[]; + /** Whether to return more than one intent. `true` indicates that all matching intents are returned. **/ alternate_intents?: boolean; + /** State information for the conversation. **/ context: Context; + /** Output from the dialog, including the response to the user, the nodes that were triggered, and log messages. **/ output: OutputData; } + /** ValueExport. **/ export interface ValueExport { + /** The text of the entity value. **/ value_text: string; + /** Any metadata related to the entity value. **/ metadata?: Object; + /** The timestamp for creation of the entity value. **/ created: string; + /** The timestamp for the last update to the entity value. **/ updated: string; + /** An array of synonyms. **/ synonyms?: string[]; + /** An array of patterns for the entity value. A pattern is specified as a regular expression. **/ patterns?: string[]; + /** Specifies the type of value (`synonyms` or `patterns`). The default value is `synonyms`. **/ value_type: string; } + /** WorkspaceExport. **/ export interface WorkspaceExport { + /** The name of the workspace. **/ name: string; + /** The description of the workspace. **/ description: string; + /** The language of the workspace. **/ language: string; + /** Any metadata that is required by the workspace. **/ metadata: Object; + /** The timestamp for creation of the workspace. **/ created: string; + /** The timestamp for the last update to the workspace. **/ updated: string; + /** The workspace ID. **/ workspace_id: string; + /** The current status of the workspace. **/ status: string; + /** Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. **/ learning_opt_out: boolean; + /** An array of intents. **/ intents?: IntentExport[]; + /** An array of entities. **/ entities?: EntityExport[]; + /** An array of counterexamples. **/ counterexamples?: Counterexample[]; + /** An array of objects describing the dialog nodes in the workspace. **/ dialog_nodes?: DialogNode[]; } - } -export = ConversationV1; +export = GeneratedConversationV1; diff --git a/discovery/v1-generated.ts b/discovery/v1-generated.ts index 07985d59c0..468b2a0ded 100644 --- a/discovery/v1-generated.ts +++ b/discovery/v1-generated.ts @@ -26,17 +26,18 @@ import { FileObject } from '../lib/helper'; */ class GeneratedDiscoveryV1 extends BaseService { + name: string; // set by prototype to 'discovery' version: string; // set by prototype to 'v1' static VERSION_DATE_2017_09_01: string = '2017-09-01'; - + static VERSION_DATE_2017_08_01: string = '2017-08-01'; - + static VERSION_DATE_2017_07_19: string = '2017-07-19'; - + static VERSION_DATE_2017_06_25: string = '2017-06-25'; - + static VERSION_DATE_2016_12_01: string = '2016-12-01'; static URL: string = 'https://gateway.watsonplatform.net/discovery/api'; @@ -60,13 +61,15 @@ class GeneratedDiscoveryV1 extends BaseService { super(options); // check if 'version_date' was provided if (typeof this._options.version_date === 'undefined') { - throw new Error( - 'Argument error: version_date was not specified, use GeneratedDiscoveryV1.VERSION_DATE_2017_09_01' - ); + throw new Error('Argument error: version_date was not specified'); } this._options.qs.version = options.version_date; } + /************************* + * environments + ************************/ + /** * Add an environment. * @@ -79,18 +82,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createEnvironment( - params: GeneratedDiscoveryV1.CreateEnvironmentParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + createEnvironment(params: GeneratedDiscoveryV1.CreateEnvironmentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['name']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description, size: _params.size @@ -100,17 +100,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments', method: 'POST', json: true, - body: body + body: body, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Delete environment. @@ -120,37 +120,32 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteEnvironment( - params: GeneratedDiscoveryV1.DeleteEnvironmentParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DeleteEnvironmentResponse - > - ): ReadableStream | void { + deleteEnvironment(params: GeneratedDiscoveryV1.DeleteEnvironmentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { options: { url: '/v1/environments/{environment_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Get environment info. @@ -160,35 +155,32 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getEnvironment( - params: GeneratedDiscoveryV1.GetEnvironmentParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + getEnvironment(params: GeneratedDiscoveryV1.GetEnvironmentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { options: { url: '/v1/environments/{environment_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * List environments. @@ -200,32 +192,27 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listEnvironments( - params?: GeneratedDiscoveryV1.ListEnvironmentsParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.ListEnvironmentsResponse - > - ): ReadableStream | void { - const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; - const query = { + listEnvironments(params?: GeneratedDiscoveryV1.ListEnvironmentsParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { + const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); + const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {}; + const query = { name: _params.name }; const parameters = { options: { url: '/v1/environments', method: 'GET', - qs: query + qs: query, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * List fields in specified collecitons. @@ -238,23 +225,18 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listFields( - params: GeneratedDiscoveryV1.ListFieldsParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.ListCollectionFieldsResponse - > - ): ReadableStream | void { + listFields(params: GeneratedDiscoveryV1.ListFieldsParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_ids']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { collection_ids: _params.collection_ids }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -262,17 +244,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments/{environment_id}/fields', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Update an environment. @@ -286,22 +268,19 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateEnvironment( - params: GeneratedDiscoveryV1.UpdateEnvironmentParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + updateEnvironment(params: GeneratedDiscoveryV1.UpdateEnvironmentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -310,17 +289,21 @@ class GeneratedDiscoveryV1 extends BaseService { method: 'PUT', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * configurations + ************************/ /** * Add configuration. @@ -337,25 +320,22 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createConfiguration( - params: GeneratedDiscoveryV1.CreateConfigurationParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + createConfiguration(params: GeneratedDiscoveryV1.CreateConfigurationParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'name']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description, conversions: _params.conversions, enrichments: _params.enrichments, normalizations: _params.normalizations }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -364,17 +344,17 @@ class GeneratedDiscoveryV1 extends BaseService { method: 'POST', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Delete a configuration. @@ -387,39 +367,33 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteConfiguration( - params: GeneratedDiscoveryV1.DeleteConfigurationParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DeleteConfigurationResponse - > - ): ReadableStream | void { + deleteConfiguration(params: GeneratedDiscoveryV1.DeleteConfigurationParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'configuration_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, configuration_id: _params.configuration_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/configurations/{configuration_id}', + url: '/v1/environments/{environment_id}/configurations/{configuration_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Get configuration details. @@ -430,37 +404,33 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getConfiguration( - params: GeneratedDiscoveryV1.GetConfigurationParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + getConfiguration(params: GeneratedDiscoveryV1.GetConfigurationParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'configuration_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, configuration_id: _params.configuration_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/configurations/{configuration_id}', + url: '/v1/environments/{environment_id}/configurations/{configuration_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * List configurations. @@ -473,23 +443,18 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listConfigurations( - params: GeneratedDiscoveryV1.ListConfigurationsParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.ListConfigurationsResponse - > - ): ReadableStream | void { + listConfigurations(params: GeneratedDiscoveryV1.ListConfigurationsParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { name: _params.name }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -497,17 +462,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments/{environment_id}/configurations', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Update a configuration. @@ -525,46 +490,46 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateConfiguration( - params: GeneratedDiscoveryV1.UpdateConfigurationParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + updateConfiguration(params: GeneratedDiscoveryV1.UpdateConfigurationParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'configuration_id', 'name']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description, conversions: _params.conversions, enrichments: _params.enrichments, normalizations: _params.normalizations }; - const path = { + const path = { environment_id: _params.environment_id, configuration_id: _params.configuration_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/configurations/{configuration_id}', + url: '/v1/environments/{environment_id}/configurations/{configuration_id}', method: 'PUT', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * testYourConfigurationOnADocument + ************************/ /** * Test configuration. @@ -582,12 +547,9 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - testConfigurationInEnvironment( - params: GeneratedDiscoveryV1.TestConfigurationInEnvironmentParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + testConfigurationInEnvironment(params: GeneratedDiscoveryV1.TestConfigurationInEnvironmentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -595,14 +557,17 @@ class GeneratedDiscoveryV1 extends BaseService { } const formData = { configuration: _params.configuration, - file: { data: _params.file, contentType: _params.file_content_type }, + file: { + data: _params.file, + contentType: _params.file_content_type + }, metadata: _params.metadata }; - const query = { + const query = { step: _params.step, configuration_id: _params.configuration_id }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -613,15 +578,19 @@ class GeneratedDiscoveryV1 extends BaseService { path: path, formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + 'Accept': 'application/json', + 'Content-Type': 'multipart/form-data', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * collections + ************************/ /** * Create a collection. @@ -635,24 +604,21 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createCollection( - params: GeneratedDiscoveryV1.CreateCollectionParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + createCollection(params: GeneratedDiscoveryV1.CreateCollectionParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'name']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description, configuration_id: _params.configuration_id, language: _params.language }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -661,17 +627,17 @@ class GeneratedDiscoveryV1 extends BaseService { method: 'POST', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Delete a collection. @@ -682,20 +648,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteCollection( - params: GeneratedDiscoveryV1.DeleteCollectionParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DeleteCollectionResponse - > - ): ReadableStream | void { + deleteCollection(params: GeneratedDiscoveryV1.DeleteCollectionParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; @@ -703,17 +664,17 @@ class GeneratedDiscoveryV1 extends BaseService { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Get collection details. @@ -724,18 +685,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getCollection( - params: GeneratedDiscoveryV1.GetCollectionParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + getCollection(params: GeneratedDiscoveryV1.GetCollectionParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; @@ -743,17 +701,17 @@ class GeneratedDiscoveryV1 extends BaseService { options: { url: '/v1/environments/{environment_id}/collections/{collection_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * List unique fields. @@ -766,39 +724,33 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listCollectionFields( - params: GeneratedDiscoveryV1.ListCollectionFieldsParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.ListCollectionFieldsResponse - > - ): ReadableStream | void { + listCollectionFields(params: GeneratedDiscoveryV1.ListCollectionFieldsParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/fields', + url: '/v1/environments/{environment_id}/collections/{collection_id}/fields', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * List collections. @@ -811,23 +763,18 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listCollections( - params: GeneratedDiscoveryV1.ListCollectionsParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.ListCollectionsResponse - > - ): ReadableStream | void { + listCollections(params: GeneratedDiscoveryV1.ListCollectionsParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { name: _params.name }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -835,17 +782,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments/{environment_id}/collections', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Update a collection. @@ -859,23 +806,20 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateCollection( - params: GeneratedDiscoveryV1.UpdateCollectionParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + updateCollection(params: GeneratedDiscoveryV1.UpdateCollectionParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { name: _params.name, description: _params.description, configuration_id: _params.configuration_id }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; @@ -885,17 +829,21 @@ class GeneratedDiscoveryV1 extends BaseService { method: 'PUT', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * documents + ************************/ /** * Add a document. @@ -911,44 +859,41 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - addDocument( - params: GeneratedDiscoveryV1.AddDocumentParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DocumentAccepted - > - ): ReadableStream | void { + addDocument(params: GeneratedDiscoveryV1.AddDocumentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const formData = { - file: { data: _params.file, contentType: _params.file_content_type }, + file: { + data: _params.file, + contentType: _params.file_content_type + }, metadata: _params.metadata }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/documents', + url: '/v1/environments/{environment_id}/collections/{collection_id}/documents', method: 'POST', path: path, formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + 'Accept': 'application/json', + 'Content-Type': 'multipart/form-data', } }) }; return createRequest(parameters, _callback); - } + }; /** * Delete a document. @@ -962,40 +907,34 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteDocument( - params: GeneratedDiscoveryV1.DeleteDocumentParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DeleteDocumentResponse - > - ): ReadableStream | void { + deleteDocument(params: GeneratedDiscoveryV1.DeleteDocumentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'document_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, document_id: _params.document_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Get document details. @@ -1009,40 +948,34 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getDocumentStatus( - params: GeneratedDiscoveryV1.GetDocumentStatusParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DocumentStatus - > - ): ReadableStream | void { + getDocumentStatus(params: GeneratedDiscoveryV1.GetDocumentStatusParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'document_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, document_id: _params.document_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Update a document. @@ -1059,45 +992,46 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateDocument( - params: GeneratedDiscoveryV1.UpdateDocumentParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.DocumentAccepted - > - ): ReadableStream | void { + updateDocument(params: GeneratedDiscoveryV1.UpdateDocumentParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'document_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const formData = { - file: { data: _params.file, contentType: _params.file_content_type }, + file: { + data: _params.file, + contentType: _params.file_content_type + }, metadata: _params.metadata }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, document_id: _params.document_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/documents/{document_id}', method: 'POST', path: path, formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + 'Accept': 'application/json', + 'Content-Type': 'multipart/form-data', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * queries + ************************/ /** * Query documents in multiple collections. @@ -1121,18 +1055,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - federatedQuery( - params: GeneratedDiscoveryV1.FederatedQueryParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + federatedQuery(params: GeneratedDiscoveryV1.FederatedQueryParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_ids']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { collection_ids: _params.collection_ids, filter: _params.filter, query: _params.query, @@ -1144,9 +1075,9 @@ class GeneratedDiscoveryV1 extends BaseService { sort: _params.sort, highlight: _params.highlight, deduplicate: _params.deduplicate, - deduplicate_field: _params.deduplicate_field + 'deduplicate.field': _params.deduplicate_field }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -1154,17 +1085,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments/{environment_id}/query', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Query multiple collection system notices. @@ -1187,20 +1118,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - federatedQueryNotices( - params: GeneratedDiscoveryV1.FederatedQueryNoticesParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.QueryNoticesResponse - > - ): ReadableStream | void { + federatedQueryNotices(params: GeneratedDiscoveryV1.FederatedQueryNoticesParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_ids']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { collection_ids: _params.collection_ids, filter: _params.filter, query: _params.query, @@ -1211,9 +1137,9 @@ class GeneratedDiscoveryV1 extends BaseService { offset: _params.offset, sort: _params.sort, highlight: _params.highlight, - deduplicate_field: _params.deduplicate_field + 'deduplicate.field': _params.deduplicate_field }; - const path = { + const path = { environment_id: _params.environment_id }; const parameters = { @@ -1221,17 +1147,17 @@ class GeneratedDiscoveryV1 extends BaseService { url: '/v1/environments/{environment_id}/notices', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Query documents. @@ -1259,18 +1185,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - query( - params: GeneratedDiscoveryV1.QueryParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + query(params: GeneratedDiscoveryV1.QueryParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { filter: _params.filter, query: _params.query, natural_language_query: _params.natural_language_query, @@ -1281,33 +1204,32 @@ class GeneratedDiscoveryV1 extends BaseService { offset: _params.offset, sort: _params.sort, highlight: _params.highlight, - passages_fields: _params.passages_fields, - passages_count: _params.passages_count, - passages_characters: _params.passages_characters, + 'passages.fields': _params.passages_fields, + 'passages.count': _params.passages_count, + 'passages.characters': _params.passages_characters, deduplicate: _params.deduplicate, - deduplicate_field: _params.deduplicate_field + 'deduplicate.field': _params.deduplicate_field }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/query', + url: '/v1/environments/{environment_id}/collections/{collection_id}/query', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * Query system notices. @@ -1334,20 +1256,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - queryNotices( - params: GeneratedDiscoveryV1.QueryNoticesParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.QueryNoticesResponse - > - ): ReadableStream | void { + queryNotices(params: GeneratedDiscoveryV1.QueryNoticesParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const query = { + const query = { filter: _params.filter, query: _params.query, natural_language_query: _params.natural_language_query, @@ -1358,32 +1275,35 @@ class GeneratedDiscoveryV1 extends BaseService { offset: _params.offset, sort: _params.sort, highlight: _params.highlight, - passages_fields: _params.passages_fields, - passages_count: _params.passages_count, - passages_characters: _params.passages_characters, - deduplicate_field: _params.deduplicate_field + 'passages.fields': _params.passages_fields, + 'passages.count': _params.passages_count, + 'passages.characters': _params.passages_characters, + 'deduplicate.field': _params.deduplicate_field }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/notices', + url: '/v1/environments/{environment_id}/collections/{collection_id}/notices', method: 'GET', qs: query, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; + + /************************* + * trainingData + ************************/ /** * @@ -1399,44 +1319,40 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - addTrainingData( - params: GeneratedDiscoveryV1.AddTrainingDataParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + addTrainingData(params: GeneratedDiscoveryV1.AddTrainingDataParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { natural_language_query: _params.natural_language_query, filter: _params.filter, examples: _params.examples }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', method: 'POST', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1453,47 +1369,41 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createTrainingExample( - params: GeneratedDiscoveryV1.CreateTrainingExampleParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.TrainingExample - > - ): ReadableStream | void { + createTrainingExample(params: GeneratedDiscoveryV1.CreateTrainingExampleParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'query_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { document_id: _params.document_id, cross_reference: _params.cross_reference, relevance: _params.relevance }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', method: 'POST', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1506,37 +1416,33 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteAllTrainingData( - params: GeneratedDiscoveryV1.DeleteAllTrainingDataParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + deleteAllTrainingData(params: GeneratedDiscoveryV1.DeleteAllTrainingDataParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1550,38 +1456,34 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteTrainingData( - params: GeneratedDiscoveryV1.DeleteTrainingDataParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + deleteTrainingData(params: GeneratedDiscoveryV1.DeleteTrainingDataParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'query_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1596,23 +1498,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteTrainingExample( - params: GeneratedDiscoveryV1.DeleteTrainingExampleParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + deleteTrainingExample(params: GeneratedDiscoveryV1.DeleteTrainingExampleParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; - const requiredParams = [ - 'environment_id', - 'collection_id', - 'query_id', - 'example_id' - ]; + const _callback = (callback) ? callback : () => {}; + const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id, @@ -1620,20 +1514,19 @@ class GeneratedDiscoveryV1 extends BaseService { }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', method: 'DELETE', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1647,38 +1540,34 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getTrainingData( - params: GeneratedDiscoveryV1.GetTrainingDataParams, - callback?: GeneratedDiscoveryV1.Callback - ): ReadableStream | void { + getTrainingData(params: GeneratedDiscoveryV1.GetTrainingDataParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'query_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1693,25 +1582,15 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getTrainingExample( - params: GeneratedDiscoveryV1.GetTrainingExampleParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.TrainingExample - > - ): ReadableStream | void { + getTrainingExample(params: GeneratedDiscoveryV1.GetTrainingExampleParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; - const requiredParams = [ - 'environment_id', - 'collection_id', - 'query_id', - 'example_id' - ]; + const _callback = (callback) ? callback : () => {}; + const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id, @@ -1719,20 +1598,19 @@ class GeneratedDiscoveryV1 extends BaseService { }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1745,39 +1623,33 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listTrainingData( - params: GeneratedDiscoveryV1.ListTrainingDataParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.TrainingDataSet - > - ): ReadableStream | void { + listTrainingData(params: GeneratedDiscoveryV1.ListTrainingDataParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1791,40 +1663,34 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listTrainingExamples( - params: GeneratedDiscoveryV1.ListTrainingExamplesParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.TrainingExampleList - > - ): ReadableStream | void { + listTrainingExamples(params: GeneratedDiscoveryV1.ListTrainingExamplesParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = (callback) ? callback : () => {}; const requiredParams = ['environment_id', 'collection_id', 'query_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples', method: 'GET', - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; /** * @@ -1841,29 +1707,19 @@ class GeneratedDiscoveryV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateTrainingExample( - params: GeneratedDiscoveryV1.UpdateTrainingExampleParams, - callback?: GeneratedDiscoveryV1.Callback< - GeneratedDiscoveryV1.TrainingExample - > - ): ReadableStream | void { + updateTrainingExample(params: GeneratedDiscoveryV1.UpdateTrainingExampleParams, callback?: GeneratedDiscoveryV1.Callback): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; - const requiredParams = [ - 'environment_id', - 'collection_id', - 'query_id', - 'example_id' - ]; + const _callback = (callback) ? callback : () => {}; + const requiredParams = ['environment_id', 'collection_id', 'query_id', 'example_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { cross_reference: _params.cross_reference, relevance: _params.relevance }; - const path = { + const path = { environment_id: _params.environment_id, collection_id: _params.collection_id, query_id: _params.query_id, @@ -1871,36 +1727,34 @@ class GeneratedDiscoveryV1 extends BaseService { }; const parameters = { options: { - url: - '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', + url: '/v1/environments/{environment_id}/collections/{collection_id}/training_data/{query_id}/examples/{example_id}', method: 'PUT', json: true, body: body, - path: path + path: path, }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + 'Accept': 'application/json', + 'Content-Type': 'application/json', } }) }; return createRequest(parameters, _callback); - } + }; + } GeneratedDiscoveryV1.prototype.name = 'discovery'; GeneratedDiscoveryV1.prototype.version = 'v1'; -namespace GeneratedDiscoveryV1 { - export interface Empty {} +/************************* + * interfaces + ************************/ - export type Callback = ( - error: any, - body?: T, - response?: RequestResponse - ) => void; +namespace GeneratedDiscoveryV1 { + /** Options for the `GeneratedDiscoveryV1` constructor. **/ export type Options = { version_date: string; url?: string; @@ -1908,119 +1762,179 @@ namespace GeneratedDiscoveryV1 { password?: string; use_unauthenticated?: boolean; headers?: object; - }; + } + + /** The callback for a service request. **/ + export type Callback = (error: any, body?: T, response?: RequestResponse) => void; + /** The body of a service request that returns no response data. **/ + export interface Empty { } + + /************************* + * request interfaces + ************************/ + + /** Parameters for the `createEnvironment` operation. **/ export interface CreateEnvironmentParams { + /** Name that identifies the environment. **/ name: string; + /** Description of the environment. **/ description?: string; - size?: CreateEnvironmentConstants.Size | number; - } - - export namespace CreateEnvironmentConstants { - export enum Size { - ONE = 1, - TWO = 2, - THREE = 3 - } + /** **Deprecated**: Size of the environment. **/ + size?: number; } + /** Parameters for the `deleteEnvironment` operation. **/ export interface DeleteEnvironmentParams { + /** The ID of the environment. **/ environment_id: string; } + /** Parameters for the `getEnvironment` operation. **/ export interface GetEnvironmentParams { + /** The ID of the environment. **/ environment_id: string; } + /** Parameters for the `listEnvironments` operation. **/ export interface ListEnvironmentsParams { + /** Show only the environment with the given name. **/ name?: string; } + /** Parameters for the `listFields` operation. **/ export interface ListFieldsParams { + /** The ID of the environment. **/ environment_id: string; + /** A comma-separated list of collection IDs to be queried against. **/ collection_ids: string[]; } + /** Parameters for the `updateEnvironment` operation. **/ export interface UpdateEnvironmentParams { + /** The ID of the environment. **/ environment_id: string; + /** Name that identifies the environment. **/ name?: string; + /** Description of the environment. **/ description?: string; } + /** Parameters for the `createConfiguration` operation. **/ export interface CreateConfigurationParams { + /** The ID of the environment. **/ environment_id: string; + /** The name of the configuration. **/ name: string; + /** The description of the configuration, if available. **/ description?: string; + /** The document conversion settings for the configuration. **/ conversions?: Conversions; + /** An array of document enrichment settings for the configuration. **/ enrichments?: Enrichment[]; + /** Defines operations that can be used to transform the final output JSON into a normalized form. Operations are executed in the order that they appear in the array. **/ normalizations?: NormalizationOperation[]; } + /** Parameters for the `deleteConfiguration` operation. **/ export interface DeleteConfigurationParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the configuration. **/ configuration_id: string; } + /** Parameters for the `getConfiguration` operation. **/ export interface GetConfigurationParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the configuration. **/ configuration_id: string; } + /** Parameters for the `listConfigurations` operation. **/ export interface ListConfigurationsParams { + /** The ID of the environment. **/ environment_id: string; + /** Find configurations with the given name. **/ name?: string; } + /** Parameters for the `updateConfiguration` operation. **/ export interface UpdateConfigurationParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the configuration. **/ configuration_id: string; + /** The name of the configuration. **/ name: string; + /** The description of the configuration, if available. **/ description?: string; + /** The document conversion settings for the configuration. **/ conversions?: Conversions; + /** An array of document enrichment settings for the configuration. **/ enrichments?: Enrichment[]; + /** Defines operations that can be used to transform the final output JSON into a normalized form. Operations are executed in the order that they appear in the array. **/ normalizations?: NormalizationOperation[]; } + /** Parameters for the `testConfigurationInEnvironment` operation. **/ export interface TestConfigurationInEnvironmentParams { + /** The ID of the environment. **/ environment_id: string; + /** The configuration to use to process the document. If this part is provided, then the provided configuration is used to process the document. If the `configuration_id` is also provided (both are present at the same time), then request is rejected. The maximum supported configuration size is 1 MB. Configuration parts larger than 1 MB are rejected. See the `GET /configurations/{configuration_id}` operation for an example configuration. **/ configuration?: string; + /** Specify to only run the input document through the given step instead of running the input document through the entire ingestion workflow. Valid values are `convert`, `enrich`, and `normalize`. **/ step?: TestConfigurationInEnvironmentConstants.Step | string; + /** The ID of the configuration to use to process the document. If the `configuration` form part is also provided (both are present at the same time), then request will be rejected. **/ configuration_id?: string; - file?: ReadableStream | FileObject | Buffer; + /** The content of the document to ingest. The maximum supported file size is 50 megabytes. Files larger than 50 megabytes is rejected. **/ + file?: ReadableStream|FileObject|Buffer; + /** If you're using the Data Crawler to upload your documents, you can test a document against the type of metadata that the Data Crawler might send. The maximum supported metadata file size is 1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` { "Creator": "Johnny Appleseed", "Subject": "Apples" } ```. **/ metadata?: string; - file_content_type?: - | TestConfigurationInEnvironmentConstants.FileContentType - | string; + /** The content type of file. **/ + file_content_type?: TestConfigurationInEnvironmentConstants.FileContentType | string; } + /** Constants for the `testConfigurationInEnvironment` operation. **/ export namespace TestConfigurationInEnvironmentConstants { + /** Specify to only run the input document through the given step instead of running the input document through the entire ingestion workflow. Valid values are `convert`, `enrich`, and `normalize`. **/ export enum Step { HTML_INPUT = 'html_input', HTML_OUTPUT = 'html_output', JSON_OUTPUT = 'json_output', JSON_NORMALIZATIONS_OUTPUT = 'json_normalizations_output', ENRICHMENTS_OUTPUT = 'enrichments_output', - NORMALIZATIONS_OUTPUT = 'normalizations_output' + NORMALIZATIONS_OUTPUT = 'normalizations_output', } + /** The content type of file. **/ export enum FileContentType { APPLICATION_JSON = 'application/json', APPLICATION_MSWORD = 'application/msword', APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', APPLICATION_PDF = 'application/pdf', TEXT_HTML = 'text/html', - APPLICATION_XHTML_XML = 'application/xhtml+xml' + APPLICATION_XHTML_XML = 'application/xhtml+xml', } } + /** Parameters for the `createCollection` operation. **/ export interface CreateCollectionParams { + /** The ID of the environment. **/ environment_id: string; + /** The name of the collection to be created. **/ name: string; + /** A description of the collection. **/ description?: string; + /** The ID of the configuration in which the collection is to be created. **/ configuration_id?: string; + /** The language of the documents stored in the collection, in the form of an ISO 639-1 language code. **/ language?: CreateCollectionConstants.Language | string; } + /** Constants for the `createCollection` operation. **/ export namespace CreateCollectionConstants { + /** The language of the documents stored in the collection, in the form of an ISO 639-1 language code. **/ export enum Language { EN = 'en', ES = 'es', @@ -2030,361 +1944,615 @@ namespace GeneratedDiscoveryV1 { IT = 'it', JA = 'ja', KO = 'ko', - PT_BR = 'pt-br' + PT_BR = 'pt-br', } } + /** Parameters for the `deleteCollection` operation. **/ export interface DeleteCollectionParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; } + /** Parameters for the `getCollection` operation. **/ export interface GetCollectionParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; } + /** Parameters for the `listCollectionFields` operation. **/ export interface ListCollectionFieldsParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; } + /** Parameters for the `listCollections` operation. **/ export interface ListCollectionsParams { + /** The ID of the environment. **/ environment_id: string; + /** Find collections with the given name. **/ name?: string; } + /** Parameters for the `updateCollection` operation. **/ export interface UpdateCollectionParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The name of the collection. **/ name: string; + /** A description of the collection. **/ description?: string; + /** The ID of the configuration in which the collection is to be updated. **/ configuration_id?: string; } + /** Parameters for the `addDocument` operation. **/ export interface AddDocumentParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; - file?: ReadableStream | FileObject | Buffer; + /** The content of the document to ingest. The maximum supported file size is 50 megabytes. Files larger than 50 megabytes is rejected. **/ + file?: ReadableStream|FileObject|Buffer; + /** If you're using the Data Crawler to upload your documents, you can test a document against the type of metadata that the Data Crawler might send. The maximum supported metadata file size is 1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` { "Creator": "Johnny Appleseed", "Subject": "Apples" } ```. **/ metadata?: string; + /** The content type of file. **/ file_content_type?: AddDocumentConstants.FileContentType | string; } + /** Constants for the `addDocument` operation. **/ export namespace AddDocumentConstants { + /** The content type of file. **/ export enum FileContentType { APPLICATION_JSON = 'application/json', APPLICATION_MSWORD = 'application/msword', APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', APPLICATION_PDF = 'application/pdf', TEXT_HTML = 'text/html', - APPLICATION_XHTML_XML = 'application/xhtml+xml' + APPLICATION_XHTML_XML = 'application/xhtml+xml', } } + /** Parameters for the `deleteDocument` operation. **/ export interface DeleteDocumentParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the document. **/ document_id: string; } + /** Parameters for the `getDocumentStatus` operation. **/ export interface GetDocumentStatusParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the document. **/ document_id: string; } + /** Parameters for the `updateDocument` operation. **/ export interface UpdateDocumentParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the document. **/ document_id: string; - file?: ReadableStream | FileObject | Buffer; + /** The content of the document to ingest. The maximum supported file size is 50 megabytes. Files larger than 50 megabytes is rejected. **/ + file?: ReadableStream|FileObject|Buffer; + /** If you're using the Data Crawler to upload your documents, you can test a document against the type of metadata that the Data Crawler might send. The maximum supported metadata file size is 1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` { "Creator": "Johnny Appleseed", "Subject": "Apples" } ```. **/ metadata?: string; + /** The content type of file. **/ file_content_type?: UpdateDocumentConstants.FileContentType | string; } + /** Constants for the `updateDocument` operation. **/ export namespace UpdateDocumentConstants { + /** The content type of file. **/ export enum FileContentType { APPLICATION_JSON = 'application/json', APPLICATION_MSWORD = 'application/msword', APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', APPLICATION_PDF = 'application/pdf', TEXT_HTML = 'text/html', - APPLICATION_XHTML_XML = 'application/xhtml+xml' + APPLICATION_XHTML_XML = 'application/xhtml+xml', } } + /** Parameters for the `federatedQuery` operation. **/ export interface FederatedQueryParams { + /** The ID of the environment. **/ environment_id: string; + /** A comma-separated list of collection IDs to be queried against. **/ collection_ids: string[]; + /** A cacheable query that limits the documents returned to exclude any documents that don't mention the query content. Filter searches are better for metadata type searches and when you are trying to get a sense of concepts in the data set. **/ filter?: string; + /** A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use `natural_language_query` and `query` at the same time. **/ query?: string; + /** A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use `natural_language_query` and `query` at the same time. **/ natural_language_query?: string; + /** An aggregation search uses combinations of filters and query search to return an exact answer. Aggregations are useful for building applications, because you can use them to build lists, tables, and time series. For a full list of possible aggregrations, see the Query reference. **/ aggregation?: string; + /** Number of documents to return. **/ count?: number; + /** A comma separated list of the portion of the document hierarchy to return. **/ return_fields?: string[]; + /** The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10, and the offset is 8, it returns the last two results. **/ offset?: number; + /** A comma separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. **/ sort?: string[]; + /** When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. **/ highlight?: boolean; + /** When `true` and used with a Watson Discovery News collection, duplicate results (based on the contents of the `title` field) are removed. Duplicate comparison is limited to the current query only, `offset` is not considered. Defaults to `false`. This parameter is currently Beta functionality. **/ deduplicate?: boolean; + /** When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. **/ deduplicate_field?: string; } + /** Parameters for the `federatedQueryNotices` operation. **/ export interface FederatedQueryNoticesParams { + /** The ID of the environment. **/ environment_id: string; + /** A comma-separated list of collection IDs to be queried against. **/ collection_ids: string[]; + /** A cacheable query that limits the documents returned to exclude any documents that don't mention the query content. Filter searches are better for metadata type searches and when you are trying to get a sense of concepts in the data set. **/ filter?: string; + /** A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use `natural_language_query` and `query` at the same time. **/ query?: string; + /** A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use `natural_language_query` and `query` at the same time. **/ natural_language_query?: string; + /** An aggregation search uses combinations of filters and query search to return an exact answer. Aggregations are useful for building applications, because you can use them to build lists, tables, and time series. For a full list of possible aggregrations, see the Query reference. **/ aggregation?: string; + /** Number of documents to return. **/ count?: number; + /** A comma separated list of the portion of the document hierarchy to return. **/ return_fields?: string[]; + /** The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10, and the offset is 8, it returns the last two results. **/ offset?: number; + /** A comma separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. **/ sort?: string[]; + /** When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. **/ highlight?: boolean; + /** When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. **/ deduplicate_field?: string; } + /** Parameters for the `query` operation. **/ export interface QueryParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** A cacheable query that limits the documents returned to exclude any documents that don't mention the query content. Filter searches are better for metadata type searches and when you are trying to get a sense of concepts in the data set. **/ filter?: string; + /** A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use `natural_language_query` and `query` at the same time. **/ query?: string; + /** A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use `natural_language_query` and `query` at the same time. **/ natural_language_query?: string; + /** A passages query that returns the most relevant passages from the results. **/ passages?: boolean; + /** An aggregation search uses combinations of filters and query search to return an exact answer. Aggregations are useful for building applications, because you can use them to build lists, tables, and time series. For a full list of possible aggregrations, see the Query reference. **/ aggregation?: string; + /** Number of documents to return. **/ count?: number; + /** A comma separated list of the portion of the document hierarchy to return_fields. **/ return_fields?: string[]; + /** The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10, and the offset is 8, it returns the last two results. **/ offset?: number; + /** A comma separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. **/ sort?: string[]; + /** When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. **/ highlight?: boolean; + /** A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. **/ passages_fields?: string[]; + /** The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. **/ passages_count?: number; + /** The approximate number of characters that any one passage will have. The default is `400`. The minimum is `50`. The maximum is `2000`. **/ passages_characters?: number; + /** When `true` and used with a Watson Discovery News collection, duplicate results (based on the contents of the `title` field) are removed. Duplicate comparison is limited to the current query only, `offset` is not considered. Defaults to `false`. This parameter is currently Beta functionality. **/ deduplicate?: boolean; + /** When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. **/ deduplicate_field?: string; } + /** Parameters for the `queryNotices` operation. **/ export interface QueryNoticesParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** A cacheable query that limits the documents returned to exclude any documents that don't mention the query content. Filter searches are better for metadata type searches and when you are trying to get a sense of concepts in the data set. **/ filter?: string; + /** A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use `natural_language_query` and `query` at the same time. **/ query?: string; + /** A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use `natural_language_query` and `query` at the same time. **/ natural_language_query?: string; + /** A passages query that returns the most relevant passages from the results. **/ passages?: boolean; + /** An aggregation search uses combinations of filters and query search to return an exact answer. Aggregations are useful for building applications, because you can use them to build lists, tables, and time series. For a full list of possible aggregrations, see the Query reference. **/ aggregation?: string; + /** Number of documents to return. **/ count?: number; + /** A comma separated list of the portion of the document hierarchy to return. **/ return_fields?: string[]; + /** The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10, and the offset is 8, it returns the last two results. **/ offset?: number; + /** A comma separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. **/ sort?: string[]; + /** When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. **/ highlight?: boolean; + /** A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. **/ passages_fields?: string[]; + /** The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. **/ passages_count?: number; + /** The approximate number of characters that any one passage will have. The default is `400`. The minimum is `50`. The maximum is `2000`. **/ passages_characters?: number; + /** When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. **/ deduplicate_field?: string; } + /** Parameters for the `addTrainingData` operation. **/ export interface AddTrainingDataParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; natural_language_query?: string; filter?: string; examples?: TrainingExample[]; } + /** Parameters for the `createTrainingExample` operation. **/ export interface CreateTrainingExampleParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; document_id?: string; cross_reference?: string; relevance?: number; } + /** Parameters for the `deleteAllTrainingData` operation. **/ export interface DeleteAllTrainingDataParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; } + /** Parameters for the `deleteTrainingData` operation. **/ export interface DeleteTrainingDataParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; } + /** Parameters for the `deleteTrainingExample` operation. **/ export interface DeleteTrainingExampleParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; + /** The ID of the document as it is indexed. **/ example_id: string; } + /** Parameters for the `getTrainingData` operation. **/ export interface GetTrainingDataParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; } + /** Parameters for the `getTrainingExample` operation. **/ export interface GetTrainingExampleParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; + /** The ID of the document as it is indexed. **/ example_id: string; } + /** Parameters for the `listTrainingData` operation. **/ export interface ListTrainingDataParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; } + /** Parameters for the `listTrainingExamples` operation. **/ export interface ListTrainingExamplesParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; } + /** Parameters for the `updateTrainingExample` operation. **/ export interface UpdateTrainingExampleParams { + /** The ID of the environment. **/ environment_id: string; + /** The ID of the collection. **/ collection_id: string; + /** The ID of the query used for training. **/ query_id: string; + /** The ID of the document as it is indexed. **/ example_id: string; cross_reference?: string; relevance?: number; } + /************************* + * model interfaces + ************************/ + + /** AggregationResult. **/ export interface AggregationResult { + /** Key that matched the aggregation type. **/ key?: string; + /** Number of matching results. **/ matching_results?: number; + /** Aggregations returned in the case of chained aggregations. **/ aggregations?: QueryAggregation[]; } + /** A collection for storing documents. **/ export interface Collection { + /** The unique identifier of the collection. **/ collection_id?: string; + /** The name of the collection. **/ name?: string; + /** The description of the collection. **/ description?: string; + /** The creation date of the collection in the format yyyy-MM-dd'T'HH:mmcon:ss.SSS'Z'. **/ created?: string; + /** The timestamp of when the collection was last updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ updated?: string; + /** The status of the collection. **/ status?: string; + /** The unique identifier of the collection's configuration. **/ configuration_id?: string; + /** The language of the documents stored in the collection. Permitted values include `en_us` (U.S. English), `de` (German), and `es` (Spanish). **/ language?: string; + /** The object providing information about the documents in the collection. Present only when retrieving details of a collection. **/ document_counts?: DocumentCounts; + /** The object providing information about the disk usage of the collection. Present only when retrieving details of a collection. **/ disk_usage?: CollectionDiskUsage; + /** Provides information about the status of relevance training for collection. **/ training_status?: TrainingStatus; } + /** Summary of the disk usage statistics for this collection. **/ export interface CollectionDiskUsage { + /** Number of bytes used by the collection. **/ used_bytes?: number; } + /** Summary of the collection usage in the environment. **/ + export interface CollectionUsage { + /** Number of active collections in the environment. **/ + available?: number; + /** Total number of collections allowed in the environment. **/ + maximum_allowed?: number; + } + + /** A custom configuration for the environment. **/ export interface Configuration { + /** The unique identifier of the configuration. **/ configuration_id?: string; + /** The name of the configuration. **/ name: string; + /** The creation date of the configuration in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ created?: string; + /** The timestamp of when the configuration was last updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ updated?: string; + /** The description of the configuration, if available. **/ description?: string; + /** The document conversion settings for the configuration. **/ conversions?: Conversions; + /** An array of document enrichment settings for the configuration. **/ enrichments?: Enrichment[]; + /** Defines operations that can be used to transform the final output JSON into a normalized form. Operations are executed in the order that they appear in the array. **/ normalizations?: NormalizationOperation[]; } + /** Document conversion settings. **/ export interface Conversions { + /** A list of PDF conversion settings. **/ pdf?: PdfSettings; + /** A list of Word conversion settings. **/ word?: WordSettings; + /** A list of HTML conversion settings. **/ html?: HtmlSettings; + /** Defines operations that can be used to transform the final output JSON into a normalized form. Operations are executed in the order that they appear in the array. **/ json_normalizations?: NormalizationOperation[]; } + /** DeleteCollectionResponse. **/ export interface DeleteCollectionResponse { + /** The unique identifier of the collection that is being deleted. **/ collection_id: string; + /** The status of the collection. The status of a successful deletion operation is `deleted`. **/ status: string; } + /** DeleteConfigurationResponse. **/ export interface DeleteConfigurationResponse { + /** The unique identifier for the configuration. **/ configuration_id: string; + /** Status of the configuration. A deleted configuration has the status deleted. **/ status: string; + /** An array of notice messages, if any. **/ notices?: Notice[]; } + /** DeleteDocumentResponse. **/ export interface DeleteDocumentResponse { + /** The unique identifier of the document. **/ document_id?: string; + /** Status of the document. A deleted document has the status deleted. **/ status?: string; } + /** DeleteEnvironmentResponse. **/ export interface DeleteEnvironmentResponse { + /** The unique identifier for the environment. **/ environment_id: string; + /** Status of the environment. **/ status: string; } + /** Summary of the disk usage statistics for the environment. **/ export interface DiskUsage { + /** Number of bytes used on the environment's disk capacity. **/ used_bytes?: number; + /** Total number of bytes available in the environment's disk capacity. **/ maximum_allowed_bytes?: number; + /** **Deprecated**: Total number of bytes available in the environment's disk capacity. **/ total_bytes?: number; + /** **Deprecated**: Amount of disk capacity used, in KB or GB format. **/ used?: string; + /** **Deprecated**: Total amount of the environment's disk capacity, in KB or GB format. **/ total?: string; + /** **Deprecated**: Percentage of the environment's disk capacity that is being used. **/ percent_used?: number; } + /** DocumentAccepted. **/ export interface DocumentAccepted { + /** The unique identifier of the ingested document. **/ document_id?: string; + /** Status of the document in the ingestion process. **/ status?: string; + /** Array of notices produced by the document-ingestion process. **/ notices?: Notice[]; } + /** DocumentCounts. **/ export interface DocumentCounts { + /** The total number of available documents in the collection. **/ available?: number; + /** The number of documents in the collection that are currently being processed. **/ processing?: number; + /** The number of documents in the collection that failed to be ingested. **/ failed?: number; } + /** DocumentSnapshot. **/ export interface DocumentSnapshot { step?: string; snapshot?: Object; } + /** Status information about a submitted document. **/ export interface DocumentStatus { + /** The unique identifier of the document. **/ document_id: string; + /** The unique identifier for the configuration. **/ configuration_id: string; + /** The creation date of the document in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ created: string; + /** Date of the most recent document update, in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ updated: string; + /** Status of the document in the ingestion process. **/ status: string; + /** Description of the document status. **/ status_description: string; + /** Name of the original source file (if available). **/ filename?: string; + /** The type of the original source file. **/ file_type?: string; + /** The SHA-1 hash of the original source file (formatted as a hexadecimal string). **/ sha1?: string; + /** Array of notices produced by the document-ingestion process. **/ notices: Notice[]; } + /** Enrichment. **/ export interface Enrichment { + /** Describes what the enrichment step does. **/ description?: string; + /** Field where enrichments will be stored. This field must already exist or be at most 1 level deeper than an existing field. For example, if `text` is a top-level field with no sub-fields, `text.foo` is a valid destination but `text.foo.bar` is not. **/ destination_field: string; + /** Field to be enriched. **/ source_field: string; + /** Indicates that the enrichments will overwrite the destination_field field if it already exists. **/ overwrite?: boolean; + /** Name of the enrichment service to call. Currently the only valid value is `alchemy_language`. **/ enrichment_name: string; + /** If true, then most errors generated during the enrichment process will be treated as warnings and will not cause the document to fail processing. **/ ignore_downstream_errors?: boolean; + /** A list of options specific to the enrichment. **/ options?: EnrichmentOptions; } + /** Details about an environment. **/ export interface Environment { + /** Unique identifier for the environment. **/ environment_id?: string; + /** Name that identifies the environment. **/ name?: string; + /** Description of the environment. **/ description?: string; + /** Creation date of the environment, in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ created?: string; + /** Date of most recent environment update, in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ updated?: string; + /** Status of the environment. **/ status?: string; + /** If true, then the environment contains read-only collections which are maintained by IBM. **/ read_only?: boolean; + /** **Deprecated**: Size of the environment. **/ size?: number; + /** Details about the resource usage and capacity of the environment. **/ index_capacity?: IndexCapacity; } + /** Summary of the document usage statistics for the environment. **/ export interface EnvironmentDocuments { + /** Number of documents indexed for the environment. **/ indexed?: number; + /** Total number of documents allowed in the environment's capacity. **/ maximum_allowed?: number; } + /** Field. **/ export interface Field { + /** The name of the field. **/ field_name?: string; + /** The type of the field. **/ field_type?: string; } + /** FontSetting. **/ export interface FontSetting { level?: number; min_size?: number; @@ -2394,6 +2562,7 @@ namespace GeneratedDiscoveryV1 { name?: string; } + /** A list of HTML conversion settings. **/ export interface HtmlSettings { exclude_tags_completely?: string[]; exclude_tags_keep_content?: string[]; @@ -2403,69 +2572,110 @@ namespace GeneratedDiscoveryV1 { exclude_tag_attributes?: string[]; } + /** Details about the resource usage and capacity of the environment. **/ export interface IndexCapacity { + /** Summary of the document usage statistics for the environment. **/ documents?: EnvironmentDocuments; + /** Summary of the disk usage of the environment. **/ disk_usage?: DiskUsage; + /** Summary of the collection usage in the environment. **/ + collections?: CollectionUsage; + /** **Deprecated**: Summary of the memory usage of the environment. **/ memory_usage?: MemoryUsage; } + /** The list of fetched fields. The fields are returned using a fully qualified name format, however, the format differs slightly from that used by the query operations. * Fields which contain nested JSON objects are assigned a type of "nested". * Fields which belong to a nested object are prefixed with `.properties` (for example, `warnings.properties.severity` means that the `warnings` object has a property called `severity`). * Fields returned from the News collection are prefixed with `v{N}-fullnews-t3-{YEAR}.mappings` (for example, `v5-fullnews-t3-2016.mappings.text.properties.author`). **/ export interface ListCollectionFieldsResponse { + /** An array containing information about each field in the collections. **/ fields?: Field[]; } + /** ListCollectionsResponse. **/ export interface ListCollectionsResponse { + /** An array containing information about each collection in the environment. **/ collections?: Collection[]; } + /** ListConfigurationsResponse. **/ export interface ListConfigurationsResponse { + /** An array of Configurations that are available for the service instance. **/ configurations?: Configuration[]; } + /** ListEnvironmentsResponse. **/ export interface ListEnvironmentsResponse { + /** An array of [environments] that are available for the service instance. **/ environments?: Environment[]; } + /** **Deprecated**: Summary of the memory usage statistics for this environment. **/ export interface MemoryUsage { + /** **Deprecated**: Number of bytes used in the environment's memory capacity. **/ used_bytes?: number; + /** **Deprecated**: Total number of bytes available in the environment's memory capacity. **/ total_bytes?: number; + /** **Deprecated**: Amount of memory capacity used, in KB or GB format. **/ used?: string; + /** **Deprecated**: Total amount of the environment's memory capacity, in KB or GB format. **/ total?: string; + /** **Deprecated**: Percentage of the environment's memory capacity that is being used. **/ percent_used?: number; } + /** NormalizationOperation. **/ export interface NormalizationOperation { + /** Identifies what type of operation to perform. **copy** - Copies the value of the `source_field` to the `destination_field` field. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. **move** - Renames (moves) the `source_field` to the `destination_field`. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. Rename is identical to copy, except that the `source_field` is removed after the value has been copied to the `destination_field` (it is the same as a _copy_ followed by a _remove_). **merge** - Merges the value of the `source_field` with the value of the `destination_field`. The `destination_field` is converted into an array if it is not already an array, and the value of the `source_field` is appended to the array. This operation removes the `source_field` after the merge. If the `source_field` does not exist in the current document, then the `destination_field` is still converted into an array (if it is not an array already). This is ensures the type for `destination_field` is consistent across all documents. **remove** - Deletes the `source_field` field. The `destination_field` is ignored for this operation. **remove_nulls** - Removes all nested null (blank) leif values from the JSON tree. `source_field` and `destination_field` are ignored by this operation because _remove_nulls_ operates on the entire JSON tree. Typically, `remove_nulls` is invoked as the last normalization operation (if it is inoked at all, it can be time-expensive). **/ operation?: string; + /** The source field for the operation. **/ source_field?: string; + /** The destination field for the operation. **/ destination_field?: string; } + /** A notice produced for the collection. **/ export interface Notice { + /** Identifies the notice. Many notices might have the same ID. This field exists so that user applications can programmatically identify a notice and take automatic corrective action. **/ notice_id?: string; + /** The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. **/ created?: string; + /** Unique identifier of the document. **/ document_id?: string; + /** Unique identifier of the query used for relevance training. **/ query_id?: string; + /** Severity level of the notice. **/ severity?: string; + /** Ingestion or training step in which the notice occurred. **/ step?: string; + /** The description of the notice. **/ description?: string; } + /** PdfHeadingDetection. **/ export interface PdfHeadingDetection { fonts?: FontSetting[]; } + /** A list of PDF conversion settings. **/ export interface PdfSettings { heading?: PdfHeadingDetection; } + /** An aggregation produced by the Discovery service to analyze the input provided. **/ export interface QueryAggregation { + /** The type of aggregation command used. For example: term, filter, max, min, etc. **/ type?: string; + /** The field where the aggregation is located in the document. **/ field?: string; results?: AggregationResult[]; + /** The match the aggregated results queried for. **/ match?: string; + /** Number of matching results. **/ matching_results?: number; + /** Aggregations returned by the Discovery service. **/ aggregations?: QueryAggregation[]; } + /** QueryNoticesResponse. **/ export interface QueryNoticesResponse { matching_results?: number; results?: QueryNoticesResult[]; @@ -2474,15 +2684,23 @@ namespace GeneratedDiscoveryV1 { duplicates_removed?: number; } + /** QueryPassages. **/ export interface QueryPassages { + /** The unique identifier of the document from which the passage has been extracted. **/ document_id?: string; + /** The confidence score of the passages's analysis. A higher score indicates greater confidence. **/ passage_score?: number; + /** The content of the extracted passage. **/ passage_text?: string; + /** The position of the first character of the extracted passage in the originating field. **/ start_offset?: number; + /** The position of the last character of the extracted passage in the originating field. **/ end_offset?: number; + /** The label of the field from which the passage has been extracted. **/ field?: string; } + /** A response containing the documents and aggregations for the query. **/ export interface QueryResponse { matching_results?: number; results?: QueryResult[]; @@ -2491,38 +2709,54 @@ namespace GeneratedDiscoveryV1 { duplicates_removed?: number; } + /** QueryResult. **/ export interface QueryResult { + /** The unique identifier of the document. **/ id?: string; + /** The confidence score of the result's analysis. Scores range from 0 to 1, with a higher score indicating greater confidence. **/ score?: number; + /** Metadata of the document. **/ metadata?: Object; + /** The collection ID of the collection containing the document for this result. **/ collection_id?: string; } + /** TestDocument. **/ export interface TestDocument { + /** The unique identifier for the configuration. **/ configuration_id?: string; + /** Status of the preview operation. **/ status?: string; + /** The number of 10-kB chunks of field data that were enriched. This can be used to estimate the cost of running a real ingestion. **/ enriched_field_units?: number; + /** Format of the test document. **/ original_media_type?: string; + /** An array of objects that describe each step in the preview process. **/ snapshots?: DocumentSnapshot[]; + /** An array of notice messages about the preview operation. **/ notices?: Notice[]; } + /** TrainingDataSet. **/ export interface TrainingDataSet { environment_id?: string; collection_id?: string; queries?: TrainingQuery[]; } + /** TrainingExample. **/ export interface TrainingExample { document_id?: string; cross_reference?: string; relevance?: number; } + /** TrainingExampleList. **/ export interface TrainingExampleList { examples?: TrainingExample[]; } + /** TrainingQuery. **/ export interface TrainingQuery { query_id?: string; natural_language_query?: string; @@ -2530,6 +2764,7 @@ namespace GeneratedDiscoveryV1 { examples?: TrainingExample[]; } + /** TrainingStatus. **/ export interface TrainingStatus { total_examples?: number; available?: boolean; @@ -2542,40 +2777,54 @@ namespace GeneratedDiscoveryV1 { data_updated?: string; } + /** WordHeadingDetection. **/ export interface WordHeadingDetection { fonts?: FontSetting[]; styles?: WordStyle[]; } + /** A list of Word conversion settings. **/ export interface WordSettings { heading?: WordHeadingDetection; } + /** WordStyle. **/ export interface WordStyle { level?: number; names?: string[]; } + /** XPathPatterns. **/ export interface XPathPatterns { xpaths?: string[]; } + /** Options which are specific to a particular enrichment. **/ export interface EnrichmentOptions { + /** A comma-separated list of analyses that will be applied when using the `alchemy_language` enrichment. See the service documentation for details on each extract option. Possible values include: * entity * keyword * taxonomy * concept * relation * doc-sentiment * doc-emotion * typed-rels. **/ extract?: string[]; sentiment?: boolean; quotations?: boolean; show_source_text?: boolean; hierarchical_typed_relations?: boolean; + /** Required when using the `typed-rel` extract option. Should be set to the ID of a previously published custom Watson Knowledge Studio model. **/ model?: string; + /** If provided, then do not attempt to detect the language of the input document. Instead, assume the language is the one specified in this field. You can set this property to work around `unsupported-text-language` errors. Supported languages include English, German, French, Italian, Portuguese, Russian, Spanish and Swedish. Supported language codes are the ISO-639-1, ISO-639-2, ISO-639-3, and the plain english name of the language (for example "russian"). **/ language?: string; } + /** QueryNoticesResult. **/ export interface QueryNoticesResult { + /** The unique identifier of the document. **/ id?: string; + /** The confidence score of the result's analysis. Scores range from 0 to 1, with a higher score indicating greater confidence. **/ score?: number; + /** Metadata of the document. **/ metadata?: Object; + /** The collection ID of the collection containing the document for this result. **/ collection_id?: string; } + } export = GeneratedDiscoveryV1; diff --git a/language-translator/v2-generated.ts b/language-translator/v2-generated.ts index 261f93b3fb..51cb9f2dca 100644 --- a/language-translator/v2-generated.ts +++ b/language-translator/v2-generated.ts @@ -41,13 +41,17 @@ class GeneratedLanguageTranslatorV2 extends BaseService { * @param {Boolean} [options.use_unauthenticated] - Set to `true` to avoid including an authorization header. This option may be useful for requests that are proxied. * @param {Object} [options.headers] - Default headers that shall be included with every request to the service. * @param {Object} [options.headers.X-Watson-Learning-Opt-Out] - Set to `true` to opt-out of data collection. By default, all IBM Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. If you are concerned with protecting the privacy of users' personal information or otherwise do not want your requests to be logged, you can opt out of logging. - * @returns {GeneratedLanguageTranslatorV2} * @constructor + * @returns {GeneratedLanguageTranslatorV2} */ constructor(options: GeneratedLanguageTranslatorV2.Options) { super(options); } + /************************* + * translate + ************************/ + /** * Translates the input text from the source language to the target language. * @@ -65,8 +69,8 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.TranslationResult > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -85,16 +89,20 @@ class GeneratedLanguageTranslatorV2 extends BaseService { json: true, body: body }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); } + /************************* + * identify + ************************/ + /** * Identifies the language of the input text. * @@ -109,16 +117,14 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.IdentifiedLanguages > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { - text: _params.text - }; + const body = { text: _params.text }; const parameters = { options: { url: '/v2/identify', @@ -126,10 +132,10 @@ class GeneratedLanguageTranslatorV2 extends BaseService { json: true, body: body }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'text/plain' + Accept: 'application/json', + 'Content-Type': 'text/plain' } }) }; @@ -151,22 +157,30 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.IdentifiableLanguages > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; - const _params = extend({}, params); + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const parameters = { options: { url: '/v2/identifiable_languages', method: 'GET' }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); } + /************************* + * models + ************************/ + /** * Uploads a TMX glossary file on top of a domain to customize a translation model. * @@ -185,8 +199,8 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.TranslationModel > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['base_model_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -217,10 +231,10 @@ class GeneratedLanguageTranslatorV2 extends BaseService { qs: query, formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + Accept: 'application/json', + 'Content-Type': 'multipart/form-data' } }) }; @@ -241,8 +255,8 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.DeleteModelResult > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['model_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -257,9 +271,9 @@ class GeneratedLanguageTranslatorV2 extends BaseService { method: 'DELETE', path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; @@ -280,8 +294,8 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.TranslationModel > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['model_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -296,9 +310,9 @@ class GeneratedLanguageTranslatorV2 extends BaseService { method: 'GET', path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; @@ -321,8 +335,12 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.TranslationModels > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; - const _params = extend({}, params); + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const query = { source: _params.source, target: _params.target, @@ -334,10 +352,10 @@ class GeneratedLanguageTranslatorV2 extends BaseService { method: 'GET', qs: query }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/x-www-form-urlencoded' + Accept: 'application/json', + 'Content-Type': 'application/x-www-form-urlencoded' } }) }; @@ -348,15 +366,12 @@ class GeneratedLanguageTranslatorV2 extends BaseService { GeneratedLanguageTranslatorV2.prototype.name = 'language_translator'; GeneratedLanguageTranslatorV2.prototype.version = 'v2'; -namespace GeneratedLanguageTranslatorV2 { - export interface Empty {} - - export type Callback = ( - error: any, - body?: T, - response?: RequestResponse - ) => void; +/************************* + * interfaces + ************************/ +namespace GeneratedLanguageTranslatorV2 { + /** Options for the `GeneratedLanguageTranslatorV2` constructor. **/ export type Options = { url?: string; username?: string; @@ -365,87 +380,158 @@ namespace GeneratedLanguageTranslatorV2 { headers?: object; }; + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} + + /************************* + * request interfaces + ************************/ + + /** Parameters for the `translate` operation. **/ export interface TranslateParams { + /** Input text in UTF-8 encoding. It is a list so that multiple paragraphs can be submitted. Also accept a single string, instead of an array, as valid input. **/ text: string[]; + /** The unique model_id of the translation model being used to translate text. The model_id inherently specifies source language, target language, and domain. If the model_id is specified, there is no need for the source and target parameters and the values are ignored. **/ model_id?: string; + /** Used in combination with target as an alternative way to select the model for translation. When target and source are set, and model_id is not set, the system chooses a default model with the right language pair to translate (usually the model based on the news domain). **/ source?: string; + /** Used in combination with source as an alternative way to select the model for translation. When target and source are set, and model_id is not set, the system chooses a default model with the right language pair to translate (usually the model based on the news domain). **/ target?: string; } + /** Parameters for the `identify` operation. **/ export interface IdentifyParams { + /** Input text in UTF-8 format. **/ text: string; } + /** Parameters for the `listIdentifiableLanguages` operation. **/ export interface ListIdentifiableLanguagesParams {} + /** Parameters for the `createModel` operation. **/ export interface CreateModelParams { + /** Specifies the domain model that is used as the base for the training. To see current supported domain models, use the GET /v2/models parameter. **/ base_model_id: string; + /** The model name. Valid characters are letters, numbers, -, and _. No spaces. **/ name?: string; + /** A TMX file with your customizations. The customizations in the file completely overwrite the domain data translation, including high frequency or high confidence phrase translations. You can upload only one glossary with a file size less than 10 MB per call. **/ forced_glossary?: ReadableStream | FileObject | Buffer; + /** A TMX file that contains entries that are treated as a parallel corpus instead of a glossary. **/ parallel_corpus?: ReadableStream | FileObject | Buffer; + /** A UTF-8 encoded plain text file that is used to customize the target language model. **/ monolingual_corpus?: ReadableStream | FileObject | Buffer; } + /** Parameters for the `deleteModel` operation. **/ export interface DeleteModelParams { + /** The model identifier. **/ model_id: string; } + /** Parameters for the `getModel` operation. **/ export interface GetModelParams { + /** Model ID to use. **/ model_id: string; } + /** Parameters for the `listModels` operation. **/ export interface ListModelsParams { + /** Filter models by source language. **/ source?: string; + /** Filter models by target language. **/ target?: string; + /** Valid values are leaving it unset, `true`, and `false`. When `true`, it filters models to return the default_models model or models. When `false`, it returns the non-default_models model or models. If not set, it returns all models, default_models and non-default_models. **/ default_models?: boolean; } + /************************* + * model interfaces + ************************/ + + /** DeleteModelResult. **/ export interface DeleteModelResult { + /** "OK" indicates that the model was successfully deleted. **/ status: string; } + /** IdentifiableLanguage. **/ export interface IdentifiableLanguage { + /** The code for an identifiable language. **/ language: string; + /** The name of the identifiable language. **/ name: string; } + /** IdentifiableLanguages. **/ export interface IdentifiableLanguages { + /** A list of all languages that the service can identify. **/ languages: IdentifiableLanguage[]; } + /** IdentifiedLanguage. **/ export interface IdentifiedLanguage { + /** The code for an identified language. **/ language: string; + /** The confidence score for the identified language. **/ confidence: number; } + /** IdentifiedLanguages. **/ export interface IdentifiedLanguages { + /** A ranking of identified languages with confidence scores. **/ languages: IdentifiedLanguage[]; } + /** Translation. **/ export interface Translation { + /** Translation output in UTF-8. **/ translation_output: string; } + /** Response payload for models. **/ export interface TranslationModel { + /** A globally unique string that identifies the underlying model that is used for translation. This string contains all the information about source language, target language, domain, and various other related configurations. **/ model_id: string; + /** If a model is trained by a user, there might be an optional “name” parameter attached during training to help the user identify the model. **/ name?: string; + /** Source language in two letter language code. Use the five letter code when clarifying between multiple supported languages. When model_id is used directly, it will override the source-target language combination. Also, when a two letter language code is used, but no suitable default is found, it returns an error. **/ source?: string; + /** Target language in two letter language code. **/ target?: string; + /** If this model is a custom model, this returns the base model that it is trained on. For a base model, this response value is empty. **/ base_model_id?: string; + /** The domain of the translation model. **/ domain?: string; + /** Whether this model can be used as a base for customization. Customized models are not further customizable, and we don't allow the customization of certain base models. **/ customizable?: boolean; + /** Whether this model is considered a default model and is used when the source and target languages are specified without the model_id. **/ default_model?: boolean; + /** Returns the ID of the Language Translator service instance that created the model, or an empty string if it is a model that is trained by IBM. **/ owner?: string; + /** Availability of a model. **/ status?: string; } + /** The response type for listing existing translation models. **/ export interface TranslationModels { + /** An array of available models. **/ models: TranslationModel[]; } + /** TranslationResult. **/ export interface TranslationResult { + /** Number of words of the complete input text. **/ word_count: number; + /** Number of characters of the complete input text. **/ character_count: number; + /** List of translation output in UTF-8, corresponding to the list of input text. **/ translations: Translation[]; } } diff --git a/natural-language-classifier/v1-generated.ts b/natural-language-classifier/v1-generated.ts index cdf67f0453..a4e48ab693 100644 --- a/natural-language-classifier/v1-generated.ts +++ b/natural-language-classifier/v1-generated.ts @@ -20,21 +20,19 @@ import { createRequest } from '../lib/requestwrapper'; import { getMissingParams } from '../lib/helper'; import { BaseService } from '../lib/base_service'; import { FileObject } from '../lib/helper'; -import { buildRequestFileObject } from '../lib/helper'; /** * IBM Watson Natural Language Classifier uses machine learning algorithms to return the top matching predefined classes for short text input. You create and train a classifier to connect predefined classes to example texts so that the service can apply those classes to new inputs. */ -class NaturalLanguageClassifierV1 extends BaseService { - +class GeneratedNaturalLanguageClassifierV1 extends BaseService { name: string; // set by prototype to 'natural_language_classifier' version: string; // set by prototype to 'v1' static URL: string = 'https://gateway.watsonplatform.net/natural-language-classifier/api'; /** - * Construct a NaturalLanguageClassifierV1 object. + * Construct a GeneratedNaturalLanguageClassifierV1 object. * * @param {Object} options - Options for the service. * @param {String} [options.url] - The base url to use when contacting the service (e.g. 'https://gateway.watsonplatform.net/natural-language-classifier/api'). The base url may differ between Bluemix regions. @@ -44,20 +42,20 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Object} [options.headers] - Default headers that shall be included with every request to the service. * @param {Object} [options.headers.X-Watson-Learning-Opt-Out] - Set to `true` to opt-out of data collection. By default, all IBM Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. If you are concerned with protecting the privacy of users' personal information or otherwise do not want your requests to be logged, you can opt out of logging. * @constructor - * @returns {NaturalLanguageClassifierV1} + * @returns {GeneratedNaturalLanguageClassifierV1} */ - constructor(options: NaturalLanguageClassifierV1.Options) { + constructor(options: GeneratedNaturalLanguageClassifierV1.Options) { super(options); } /************************* - * naturalLanguageClassifier + * naturallanguageclassifier ************************/ /** - * Returns label information for the input. + * Classify. * - * The status must be `Available` before you can use the classifier to classify text. Use `Get information about a classifier` to retrieve the status. + * Returns label information for the input. The status must be `Available` before you can use the classifier to classify text. Use `Get information about a classifier` to retrieve the status. * * @param {Object} params - The parameters to send to the service. * @param {string} params.classifier_id - Classifier ID to use. @@ -65,18 +63,23 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - classify(params: NaturalLanguageClassifierV1.ClassifyParams, callback?: NaturalLanguageClassifierV1.Callback): ReadableStream | void { + classify( + params: GeneratedNaturalLanguageClassifierV1.ClassifyParams, + callback?: GeneratedNaturalLanguageClassifierV1.Callback< + GeneratedNaturalLanguageClassifierV1.Classification + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['classifier_id', 'text']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { text: _params.text }; - const path = { + const path = { classifier_id: _params.classifier_id }; const parameters = { @@ -85,17 +88,17 @@ class NaturalLanguageClassifierV1 extends BaseService { method: 'POST', json: true, body: body, - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Create classifier. @@ -108,23 +111,28 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createClassifier(params: NaturalLanguageClassifierV1.CreateClassifierParams, callback?: NaturalLanguageClassifierV1.Callback): ReadableStream | void { + createClassifier( + params: GeneratedNaturalLanguageClassifierV1.CreateClassifierParams, + callback?: GeneratedNaturalLanguageClassifierV1.Callback< + GeneratedNaturalLanguageClassifierV1.Classifier + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['metadata', 'training_data']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const formData: any = { - training_metadata: buildRequestFileObject({ - data: _params.metadata, + const formData = { + training_metadata: { + data: _params.metadata, contentType: 'application/json' - }), - training_data: buildRequestFileObject({ - data: _params.training_data, + }, + training_data: { + data: _params.training_data, contentType: 'text/csv' - }) + } }; const parameters = { options: { @@ -132,15 +140,15 @@ class NaturalLanguageClassifierV1 extends BaseService { method: 'POST', formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'multipart/form-data', + Accept: 'application/json', + 'Content-Type': 'multipart/form-data' } }) }; return createRequest(parameters, _callback); - }; + } /** * Delete classifier. @@ -150,32 +158,37 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteClassifier(params: NaturalLanguageClassifierV1.DeleteClassifierParams, callback?: NaturalLanguageClassifierV1.Callback): ReadableStream | void { + deleteClassifier( + params: GeneratedNaturalLanguageClassifierV1.DeleteClassifierParams, + callback?: GeneratedNaturalLanguageClassifierV1.Callback< + GeneratedNaturalLanguageClassifierV1.Empty + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['classifier_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { classifier_id: _params.classifier_id }; const parameters = { options: { url: '/v1/classifiers/{classifier_id}', method: 'DELETE', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * Get information about a classifier. @@ -187,32 +200,37 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getClassifier(params: NaturalLanguageClassifierV1.GetClassifierParams, callback?: NaturalLanguageClassifierV1.Callback): ReadableStream | void { + getClassifier( + params: GeneratedNaturalLanguageClassifierV1.GetClassifierParams, + callback?: GeneratedNaturalLanguageClassifierV1.Callback< + GeneratedNaturalLanguageClassifierV1.Classifier + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['classifier_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const path = { + const path = { classifier_id: _params.classifier_id }; const parameters = { options: { url: '/v1/classifiers/{classifier_id}', method: 'GET', - path: path, + path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; + } /** * List classifiers. @@ -223,103 +241,145 @@ class NaturalLanguageClassifierV1 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listClassifiers(params?: NaturalLanguageClassifierV1.ListClassifiersParams, callback?: NaturalLanguageClassifierV1.Callback): ReadableStream | void { - const _params = (typeof params === 'function' && !callback) ? {} : extend({}, params); - const _callback = (typeof params === 'function' && !callback) ? params : (callback) ? callback : () => {}; + listClassifiers( + params?: GeneratedNaturalLanguageClassifierV1.ListClassifiersParams, + callback?: GeneratedNaturalLanguageClassifierV1.Callback< + GeneratedNaturalLanguageClassifierV1.ClassifierList + > + ): ReadableStream | void { + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const parameters = { options: { url: '/v1/classifiers', - method: 'GET', + method: 'GET' }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); - }; - + } } -NaturalLanguageClassifierV1.prototype.name = 'natural_language_classifier'; -NaturalLanguageClassifierV1.prototype.version = 'v1'; +GeneratedNaturalLanguageClassifierV1.prototype.name = 'natural_language_classifier'; +GeneratedNaturalLanguageClassifierV1.prototype.version = 'v1'; /************************* * interfaces ************************/ -namespace NaturalLanguageClassifierV1 { - - export interface Empty { } - - export type Callback = (error: any, body?: T, response?: RequestResponse) => void; - +namespace GeneratedNaturalLanguageClassifierV1 { + /** Options for the `GeneratedNaturalLanguageClassifierV1` constructor. **/ export type Options = { url?: string; username?: string; password?: string; use_unauthenticated?: boolean; headers?: object; - } + }; + + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} /************************* * request interfaces ************************/ + /** Parameters for the `classify` operation. **/ export interface ClassifyParams { + /** Classifier ID to use. **/ classifier_id: string; + /** The submitted phrase. **/ text: string; } + /** Parameters for the `createClassifier` operation. **/ export interface CreateClassifierParams { - metadata: ReadableStream|FileObject|Buffer; - training_data: ReadableStream|FileObject|Buffer; + /** Metadata in JSON format. The metadata identifies the language of the data, and an optional name to identify the classifier. For details, see the [API reference](https://www.ibm.com/watson/developercloud/natural-language-classifier/api/v1/#create_classifier). **/ + metadata: ReadableStream | FileObject | Buffer; + /** Training data in CSV format. Each text value must have at least one class. The data can include up to 15,000 records. For details, see [Using your own data](https://www.ibm.com/watson/developercloud/doc/natural-language-classifier/using-your-data.html). **/ + training_data: ReadableStream | FileObject | Buffer; } + /** Parameters for the `deleteClassifier` operation. **/ export interface DeleteClassifierParams { + /** Classifier ID to delete. **/ classifier_id: string; } + /** Parameters for the `getClassifier` operation. **/ export interface GetClassifierParams { + /** Classifier ID to query. **/ classifier_id: string; } - export interface ListClassifiersParams { - } + /** Parameters for the `listClassifiers` operation. **/ + export interface ListClassifiersParams {} /************************* * model interfaces ************************/ + /** Response from the classifier for a phrase. **/ export interface Classification { + /** Unique identifier for this classifier. **/ classifier_id?: string; + /** Link to the classifier. **/ url?: string; + /** The submitted phrase. **/ text?: string; + /** The class with the highest confidence. **/ top_class?: string; + /** An array of up to ten class-confidence pairs sorted in descending order of confidence. **/ classes?: ClassifiedClass[]; } + /** Class and confidence. **/ export interface ClassifiedClass { + /** A decimal percentage that represents the confidence that Watson has in this class. Higher values represent higher confidences. **/ confidence?: number; + /** Class label. **/ class_name?: string; } + /** A classifier for natural language phrases. **/ export interface Classifier { + /** User-supplied name for the classifier. **/ name?: string; + /** Link to the classifier. **/ url: string; + /** The state of the classifier. **/ status?: string; + /** Unique identifier for this classifier. **/ classifier_id: string; + /** Date and time (UTC) the classifier was created. **/ created?: string; + /** Additional detail about the status. **/ status_description?: string; + /** The language used for the classifier. **/ language?: string; } + /** List of available classifiers. **/ export interface ClassifierList { + /** The classifiers available to the user. Returns an empty array if no classifiers are available. **/ classifiers: Classifier[]; } - } -export = NaturalLanguageClassifierV1; +export = GeneratedNaturalLanguageClassifierV1; diff --git a/natural-language-understanding/v1.ts b/natural-language-understanding/v1.ts index db8a5c215f..ea86d2b3d3 100644 --- a/natural-language-understanding/v1.ts +++ b/natural-language-understanding/v1.ts @@ -24,7 +24,7 @@ import { BaseService } from '../lib/base_service'; * Analyze various features of text content at scale. Provide text, raw HTML, or a public URL, and IBM Watson Natural Language Understanding will give you results for the features you request. The service cleans HTML content before analysis by default, so the results can ignore most advertisements and other unwanted content. ### Concepts Identify general concepts that are referenced or alluded to in your content. Concepts that are detected typically have an associated link to a DBpedia resource. ### Entities Detect important people, places, geopolitical entities and other types of entities in your content. Entity detection recognizes consecutive coreferences of each entity. For example, analysis of the following text would count \"Barack Obama\" and \"He\" as the same entity: \"Barack Obama was the 44th President of the United States. He took office in January 2009.\" ### Keywords Determine the most important keywords in your content. Keyword phrases are organized by relevance in the results. ### Categories Categorize your content into a hierarchical 5-level taxonomy. For example, \"Leonardo DiCaprio won an Oscar\" returns \"/art and entertainment/movies and tv/movies\" as the most confident classification. ### Sentiment Determine whether your content conveys postive or negative sentiment. Sentiment information can be returned for detected entities, keywords, or user-specified target phrases found in the text. ### Emotion Detect anger, disgust, fear, joy, or sadness that is conveyed by your content. Emotion information can be returned for detected entities, keywords, or user-specified target phrases found in the text. ### Relations Recognize when two entities are related, and identify the type of relation. For example, you can identify an \"awardedTo\" relation between an award and its recipient. ### Semantic Roles Parse sentences into subject-action-object form, and identify entities and keywords that are subjects or objects of an action. ### Metadata Get author information, publication date, and the title of your text/HTML content. */ -class NaturalLanguageUnderstandingV1 extends BaseService { +class GeneratedNaturalLanguageUnderstandingV1 extends BaseService { name: string; // set by prototype to 'natural-language-understanding' version: string; // set by prototype to 'v1' @@ -34,7 +34,7 @@ class NaturalLanguageUnderstandingV1 extends BaseService { static URL: string = 'https://gateway.watsonplatform.net/natural-language-understanding/api'; /** - * Construct a NaturalLanguageUnderstandingV1 object. + * Construct a GeneratedNaturalLanguageUnderstandingV1 object. * * @param {Object} options - Options for the service. * @param {String} options.version_date - The API version date to use with the service, in "YYYY-MM-DD" format. Whenever the API is changed in a backwards incompatible way, a new minor version of the API is released. The service uses the API version for the date you specify, or the most recent version before that date. Note that you should not programmatically specify the current date at runtime, in case the API has been updated since your application's release. Instead, specify a version date that is compatible with your application, and don't change it until your application is ready for a later version. @@ -44,21 +44,23 @@ class NaturalLanguageUnderstandingV1 extends BaseService { * @param {Boolean} [options.use_unauthenticated] - Set to `true` to avoid including an authorization header. This option may be useful for requests that are proxied. * @param {Object} [options.headers] - Default headers that shall be included with every request to the service. * @param {Object} [options.headers.X-Watson-Learning-Opt-Out] - Set to `true` to opt-out of data collection. By default, all IBM Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. If you are concerned with protecting the privacy of users' personal information or otherwise do not want your requests to be logged, you can opt out of logging. - * @returns {NaturalLanguageUnderstandingV1} - * @throws {Error} * @constructor + * @returns {GeneratedNaturalLanguageUnderstandingV1} + * @throws {Error} */ - constructor(options: NaturalLanguageUnderstandingV1.Options) { + constructor(options: GeneratedNaturalLanguageUnderstandingV1.Options) { super(options); // check if 'version_date' was provided if (typeof this._options.version_date === 'undefined') { - throw new Error( - 'Argument error: version_date was not specified, use NaturalLanguageUnderstandingV1.VERSION_DATE_2017_02_27' - ); + throw new Error('Argument error: version_date was not specified'); } this._options.qs.version = options.version_date; } + /************************* + * analyze + ************************/ + /** * Analyze text, HTML, or a public webpage. * @@ -79,13 +81,13 @@ class NaturalLanguageUnderstandingV1 extends BaseService { * @returns {ReadableStream|void} */ analyze( - params: NaturalLanguageUnderstandingV1.AnalyzeParams, - callback?: NaturalLanguageUnderstandingV1.Callback< - NaturalLanguageUnderstandingV1.AnalysisResults + params: GeneratedNaturalLanguageUnderstandingV1.AnalyzeParams, + callback?: GeneratedNaturalLanguageUnderstandingV1.Callback< + GeneratedNaturalLanguageUnderstandingV1.AnalysisResults > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['features']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -110,16 +112,20 @@ class NaturalLanguageUnderstandingV1 extends BaseService { json: true, body: body }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; return createRequest(parameters, _callback); } + /************************* + * modelManagement + ************************/ + /** * Delete model. * @@ -131,13 +137,13 @@ class NaturalLanguageUnderstandingV1 extends BaseService { * @returns {ReadableStream|void} */ deleteModel( - params: NaturalLanguageUnderstandingV1.DeleteModelParams, - callback?: NaturalLanguageUnderstandingV1.Callback< - NaturalLanguageUnderstandingV1.InlineResponse200 + params: GeneratedNaturalLanguageUnderstandingV1.DeleteModelParams, + callback?: GeneratedNaturalLanguageUnderstandingV1.Callback< + GeneratedNaturalLanguageUnderstandingV1.InlineResponse200 > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; const _params = extend({}, params); + const _callback = callback ? callback : () => {}; const requiredParams = ['model_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -152,10 +158,10 @@ class NaturalLanguageUnderstandingV1 extends BaseService { method: 'DELETE', path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -172,22 +178,26 @@ class NaturalLanguageUnderstandingV1 extends BaseService { * @returns {ReadableStream|void} */ listModels( - params?: NaturalLanguageUnderstandingV1.ListModelsParams, - callback?: NaturalLanguageUnderstandingV1.Callback< - NaturalLanguageUnderstandingV1.ListModelsResults + params?: GeneratedNaturalLanguageUnderstandingV1.ListModelsParams, + callback?: GeneratedNaturalLanguageUnderstandingV1.Callback< + GeneratedNaturalLanguageUnderstandingV1.ListModelsResults > ): ReadableStream | void { - const _callback = typeof callback === 'function' ? callback : () => {}; - const _params = extend({}, params); + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const parameters = { options: { url: '/v1/models', method: 'GET' }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -195,19 +205,16 @@ class NaturalLanguageUnderstandingV1 extends BaseService { } } -NaturalLanguageUnderstandingV1.prototype.name = +GeneratedNaturalLanguageUnderstandingV1.prototype.name = 'natural-language-understanding'; -NaturalLanguageUnderstandingV1.prototype.version = 'v1'; - -namespace NaturalLanguageUnderstandingV1 { - export interface Empty {} +GeneratedNaturalLanguageUnderstandingV1.prototype.version = 'v1'; - export type Callback = ( - error: any, - body?: T, - response?: RequestResponse - ) => void; +/************************* + * interfaces + ************************/ +namespace GeneratedNaturalLanguageUnderstandingV1 { + /** Options for the `GeneratedNaturalLanguageUnderstandingV1` constructor. **/ export type Options = { version_date: string; url?: string; @@ -217,254 +224,444 @@ namespace NaturalLanguageUnderstandingV1 { headers?: object; }; + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} + + /************************* + * request interfaces + ************************/ + + /** Parameters for the `analyze` operation. **/ export interface AnalyzeParams { + /** Specific features to analyze the document for. **/ features: Features; + /** The plain text to analyze. **/ text?: string; + /** The HTML file to analyze. **/ html?: string; + /** The web page to analyze. **/ url?: string; + /** Remove website elements, such as links, ads, etc. **/ clean?: boolean; + /** XPath query for targeting nodes in HTML. **/ xpath?: string; + /** Whether to use raw HTML content if text cleaning fails. **/ fallback_to_raw?: boolean; + /** Whether or not to return the analyzed text. **/ return_analyzed_text?: boolean; + /** ISO 639-1 code indicating the language to use in the analysis. **/ language?: string; + /** Sets the maximum number of characters that are processed by the service. **/ limit_text_characters?: number; } + /** Parameters for the `deleteModel` operation. **/ export interface DeleteModelParams { + /** model_id of the model to delete. **/ model_id: string; } + /** Parameters for the `listModels` operation. **/ export interface ListModelsParams {} + /************************* + * model interfaces + ************************/ + + /** The author of the analyzed content. **/ export interface Author { + /** Name of the author. **/ name?: string; } + /** The hierarchical 5-level taxonomy the content is categorized into. **/ export interface CategoriesOptions {} + /** The hierarchical 5-level taxonomy the content is categorized into. **/ export interface CategoriesResult { + /** The path to the category through the taxonomy hierarchy. **/ label?: string; + /** Confidence score for the category classification. Higher values indicate greater confidence. **/ score?: number; } + /** Whether or not to analyze content for general concepts that are referenced or alluded to. **/ export interface ConceptsOptions { + /** Maximum number of concepts to return. **/ limit?: number; } + /** The general concepts referenced or alluded to in the specified content. **/ export interface ConceptsResult { + /** Name of the concept. **/ text?: string; + /** Relevance score between 0 and 1. Higher scores indicate greater relevance. **/ relevance?: number; + /** Link to the corresponding DBpedia resource. **/ dbpedia_resource?: string; } + /** Disambiguation information for the entity. **/ export interface DisambiguationResult { + /** Common entity name. **/ name?: string; + /** Link to the corresponding DBpedia resource. **/ dbpedia_resource?: string; + /** Entity subtype information. **/ subtype?: string[]; } + /** An object containing the emotion results of a document. **/ export interface DocumentEmotionResults { + /** An object containing the emotion results for the document. **/ emotion?: EmotionScores; } + /** DocumentSentimentResults. **/ export interface DocumentSentimentResults { + /** Indicates whether the sentiment is positive, neutral, or negative. **/ label?: string; + /** Sentiment score from -1 (negative) to 1 (positive). **/ score?: number; } + /** Whether or not to return emotion analysis of the content. **/ export interface EmotionOptions { + /** Set this to false to hide document-level emotion results. **/ document?: boolean; + /** Emotion results will be returned for each target string that is found in the document. **/ targets?: string[]; } + /** The detected anger, disgust, fear, joy, or sadness that is conveyed by the content. Emotion information can be returned for detected entities, keywords, or user-specified target phrases found in the text. **/ export interface EmotionResult { + /** The returned emotion results across the document. **/ document?: DocumentEmotionResults; + /** The returned emotion results per specified target. **/ targets?: TargetedEmotionResults[]; } + /** EmotionScores. **/ export interface EmotionScores { + /** Anger score from 0 to 1. A higher score means that the text is more likely to convey anger. **/ anger?: number; + /** Disgust score from 0 to 1. A higher score means that the text is more likely to convey disgust. **/ disgust?: number; + /** Fear score from 0 to 1. A higher score means that the text is more likely to convey fear. **/ fear?: number; + /** Joy score from 0 to 1. A higher score means that the text is more likely to convey joy. **/ joy?: number; + /** Sadness score from 0 to 1. A higher score means that the text is more likely to convey sadness. **/ sadness?: number; } + /** Whether or not to return important people, places, geopolitical, and other entities detected in the analyzed content. **/ export interface EntitiesOptions { + /** Maximum number of entities to return. **/ limit?: number; + /** Set this to true to return locations of entity mentions. **/ + mentions?: boolean; + /** Enter a custom model ID to override the standard entity detection model. **/ model?: string; + /** Set this to true to return sentiment information for detected entities. **/ sentiment?: boolean; + /** Set this to true to analyze emotion for detected keywords. **/ emotion?: boolean; } + /** The important people, places, geopolitical entities and other types of entities in your content. **/ export interface EntitiesResult { + /** Entity type. **/ type?: string; + /** The name of the entity. **/ + text?: string; + /** Relevance score from 0 to 1. Higher values indicate greater relevance. **/ relevance?: number; + /** Entity mentions and locations. **/ + mentions?: EntityMention[]; + /** How many times the entity was mentioned in the text. **/ count?: number; - text?: string; + /** Emotion analysis results for the entity, enabled with the "emotion" option. **/ emotion?: EmotionScores; + /** Sentiment analysis results for the entity, enabled with the "sentiment" option. **/ sentiment?: FeatureSentimentResults; + /** Disambiguation information for the entity. **/ disambiguation?: DisambiguationResult; } + /** EntityMention. **/ + export interface EntityMention { + /** Entity mention text. **/ + text?: string; + /** Character offsets indicating the beginning and end of the mention in the analyzed text. **/ + location?: number[]; + } + + /** FeatureSentimentResults. **/ export interface FeatureSentimentResults { + /** Sentiment score from -1 (negative) to 1 (positive). **/ score?: number; } + /** Analysis features and options. **/ export interface Features { + /** Whether or not to return the concepts that are mentioned in the analyzed text. **/ concepts?: ConceptsOptions; + /** Whether or not to extract the emotions implied in the analyzed text. **/ emotion?: EmotionOptions; + /** Whether or not to extract detected entity objects from the analyzed text. **/ entities?: EntitiesOptions; + /** Whether or not to return the keywords in the analyzed text. **/ keywords?: KeywordsOptions; + /** Whether or not the author, publication date, and title of the analyzed text should be returned. This parameter is only available for URL and HTML input. **/ metadata?: MetadataOptions; + /** Whether or not to return the relationships between detected entities in the analyzed text. **/ relations?: RelationsOptions; + /** Whether or not to return the subject-action-object relations from the analyzed text. **/ semantic_roles?: SemanticRolesOptions; + /** Whether or not to return the overall sentiment of the analyzed text. **/ sentiment?: SentimentOptions; + /** Whether or not to return the high level category the content is categorized as (i.e. news, art). **/ categories?: CategoriesOptions; } + /** InlineResponse200. **/ export interface InlineResponse200 { + /** model_id of the deleted model. **/ deleted?: string; } + /** An option indicating whether or not important keywords from the analyzed content should be returned. **/ export interface KeywordsOptions { + /** Maximum number of keywords to return. **/ limit?: number; + /** Set this to true to return sentiment information for detected keywords. **/ sentiment?: boolean; + /** Set this to true to analyze emotion for detected keywords. **/ emotion?: boolean; } + /** The most important keywords in the content, organized by relevance. **/ export interface KeywordsResult { + /** Relevance score from 0 to 1. Higher values indicate greater relevance. **/ relevance?: number; + /** The keyword text. **/ text?: string; + /** Emotion analysis results for the keyword, enabled with the "emotion" option. **/ emotion?: EmotionScores; + /** Sentiment analysis results for the keyword, enabled with the "sentiment" option. **/ sentiment?: FeatureSentimentResults; } + /** Models available for Relations and Entities features. **/ export interface ListModelsResults { models?: Model[]; } + /** The Authors, Publication Date, and Title of the document. Supports URL and HTML input types. **/ export interface MetadataOptions {} + /** The Authors, Publication Date, and Title of the document. Supports URL and HTML input types. **/ export interface MetadataResult { + /** The authors of the document. **/ authors?: Author[]; + /** The publication date in the format ISO 8601. **/ publication_date?: string; + /** The title of the document. **/ title?: string; } + /** Model. **/ export interface Model { + /** Shows as available if the model is ready for use. **/ status?: string; + /** Unique model ID. **/ model_id?: string; + /** ISO 639-1 code indicating the language of the model. **/ language?: string; + /** Model description. **/ description?: string; } + /** RelationArgument. **/ export interface RelationArgument { entities?: RelationEntity[]; + /** Text that corresponds to the argument. **/ text?: string; } + /** The entities extracted from a sentence in a given document. **/ export interface RelationEntity { + /** Text that corresponds to the entity. **/ text?: string; + /** Entity type. **/ type?: string; } + /** An option specifying if the relationships found between entities in the analyzed content should be returned. **/ export interface RelationsOptions { + /** Enter a custom model ID to override the default model. **/ model?: string; } + /** The relations between entities found in the content. **/ export interface RelationsResult { + /** Confidence score for the relation. Higher values indicate greater confidence. **/ score?: number; + /** The sentence that contains the relation. **/ sentence?: string; + /** The type of the relation. **/ type?: string; + /** The extracted relation objects from the text. **/ arguments?: RelationArgument[]; } + /** SemanticRolesAction. **/ export interface SemanticRolesAction { + /** Analyzed text that corresponds to the action. **/ text?: string; + /** normalized version of the action. **/ normalized?: string; verb?: SemanticRolesVerb; } + /** SemanticRolesEntity. **/ export interface SemanticRolesEntity { + /** Entity type. **/ type?: string; + /** The entity text. **/ text?: string; } + /** SemanticRolesKeyword. **/ export interface SemanticRolesKeyword { + /** The keyword text. **/ text?: string; } + /** SemanticRolesObject. **/ export interface SemanticRolesObject { + /** Object text. **/ text?: string; keywords?: SemanticRolesKeyword[]; } + /** An option specifying whether or not to identify the subjects, actions, and verbs in the analyzed content. **/ export interface SemanticRolesOptions { + /** Maximum number of semantic_roles results to return. **/ limit?: number; + /** Set this to true to return keyword information for subjects and objects. **/ keywords?: boolean; + /** Set this to true to return entity information for subjects and objects. **/ entities?: boolean; } + /** The object containing the actions and the objects the actions act upon. **/ export interface SemanticRolesResult { + /** Sentence from the source that contains the subject, action, and object. **/ sentence?: string; + /** The extracted subject from the sentence. **/ subject?: SemanticRolesSubject; + /** The extracted action from the sentence. **/ action?: SemanticRolesAction; + /** The extracted object from the sentence. **/ object?: SemanticRolesObject; } + /** SemanticRolesSubject. **/ export interface SemanticRolesSubject { + /** Text that corresponds to the subject role. **/ text?: string; entities?: SemanticRolesEntity[]; keywords?: SemanticRolesKeyword[]; } + /** SemanticRolesVerb. **/ export interface SemanticRolesVerb { + /** The keyword text. **/ text?: string; + /** Verb tense. **/ tense?: string; } + /** An option specifying if sentiment of detected entities, keywords, or phrases should be returned. **/ export interface SentimentOptions { + /** Set this to false to hide document-level sentiment results. **/ document?: boolean; + /** Sentiment results will be returned for each target string that is found in the document. **/ targets?: string[]; } + /** The sentiment of the content. **/ export interface SentimentResult { + /** The document level sentiment. **/ document?: DocumentSentimentResults; + /** The targeted sentiment to analyze. **/ targets?: TargetedSentimentResults[]; } + /** An object containing the emotion results for the target. **/ export interface TargetedEmotionResults { + /** Targeted text. **/ text?: string; + /** An object containing the emotion results for the target. **/ emotion?: EmotionScores; } + /** TargetedSentimentResults. **/ export interface TargetedSentimentResults { + /** Targeted text. **/ text?: string; + /** Sentiment score from -1 (negative) to 1 (positive). **/ score?: number; } + /** Usage information. **/ export interface Usage { + /** Number of features used in the API call. **/ features?: number; + /** Number of text characters processed. **/ text_characters?: number; + /** Number of 10,000-character units processed. **/ text_units?: number; } + /** Results of the analysis, organized by feature. **/ export interface AnalysisResults { + /** Language used to analyze the text. **/ language?: string; + /** Text that was used in the analysis. **/ analyzed_text?: string; + /** URL that was used to retrieve HTML content. **/ retrieved_url?: string; + /** API usage information for the request. **/ usage?: Usage; + /** The general concepts referenced or alluded to in the specified content. **/ concepts?: ConceptsResult[]; + /** The important entities in the specified content. **/ entities?: EntitiesResult[]; + /** The important keywords in content organized by relevance. **/ keywords?: KeywordsResult[]; + /** The hierarchical 5-level taxonomy the content is categorized into. **/ categories?: CategoriesResult[]; + /** The anger, disgust, fear, joy, or sadness conveyed by the content. **/ emotion?: EmotionResult; + /** The metadata holds author information, publication date and the title of the text/HTML content. **/ metadata?: MetadataResult; + /** The relationships between entities in the content. **/ relations?: RelationsResult[]; + /** The subjects of actions and the objects the actions act upon. **/ semantic_roles?: SemanticRolesResult[]; + /** The sentiment of the content. **/ sentiment?: SentimentResult; } } -export = NaturalLanguageUnderstandingV1; +export = GeneratedNaturalLanguageUnderstandingV1; diff --git a/personality-insights/v3-generated.ts b/personality-insights/v3-generated.ts index 64870c6889..887c145d54 100644 --- a/personality-insights/v3-generated.ts +++ b/personality-insights/v3-generated.ts @@ -99,10 +99,10 @@ class PersonalityInsightsV3 extends BaseService { }, defaultOptions: extend(true, this._options, { headers: { - 'accept': 'application/json', - 'content-type': _params.content_type, - 'content-language': _params.content_language, - 'accept-language': _params.accept_language + 'Accept': 'application/json', + 'Content-Type': _params.content_type, + 'Content-Language': _params.content_language, + 'Accept-Language': _params.accept_language } }) }; diff --git a/personality-insights/v3.ts b/personality-insights/v3.ts index c472fcafb3..ee93af1f89 100644 --- a/personality-insights/v3.ts +++ b/personality-insights/v3.ts @@ -119,10 +119,10 @@ class PersonalityInsightsV3 extends GeneratedPersonalityInsightsV3 { }, defaultOptions: extend(true, this._options, { headers: { - accept: 'text/csv', - 'content-type': _params.content_type, - 'content-language': _params.content_language, - 'accept-language': _params.accept_language + Accept: 'text/csv', + 'Content-Type': _params.content_type, + 'Content-Language': _params.content_language, + 'Accept-Language': _params.accept_language } }) }; diff --git a/speech-to-text/v1-generated.ts b/speech-to-text/v1-generated.ts index b671604f28..fa9fb1ad79 100644 --- a/speech-to-text/v1-generated.ts +++ b/speech-to-text/v1-generated.ts @@ -64,9 +64,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ getModel( params: GeneratedSpeechToTextV1.GetModelParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.SpeechModel - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -105,9 +103,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ listModels( params?: GeneratedSpeechToTextV1.ListModelsParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.SpeechModels - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -137,7 +133,7 @@ class GeneratedSpeechToTextV1 extends BaseService { /** * Sends audio for speech recognition in sessionless mode. * - * Sends audio and returns transcription results for a sessionless recognition request. Returns only the final results; to enable interim results, use session-based requests or the WebSocket API. The service imposes a data size limit of 100 MB. It automatically detects the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to one-channel mono during transcoding. (For the `audio/l16` format, you can specify the endianness.) ###Streaming mode For requests to transcribe live audio as it becomes available or to transcribe multiple audio files with multipart requests, you must set the `Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode, the server closes the connection (status code 408) if the service receives no data chunk for 30 seconds and the service has no audio to transcribe for 30 seconds. The server also closes the connection (status code 400) if no speech is detected for `inactivity_timeout` seconds of audio (not processing time); use the `inactivity_timeout` parameter to change the default of 30 seconds. ###Non-multipart requests For non-multipart requests, you specify all parameters of the request as a collection of request headers and query parameters, and you provide the audio as the body of the request. This is the recommended means of submitting a recognition request. Use the following parameters: * **Required:** `Content-Type` and `body` * **Optional:** `Transfer-Encoding`, `model`, `customization_id`, `acoustic_customization_id`, `customization_weight`, `inactivity_timeout`, `keywords`, `keywords_threshold`, `max_alternatives`, `word_alternatives_threshold`, `word_confidence`, `timestamps`, `profanity_filter`, `smart_formatting`, and `speaker_labels` ###Multipart requests For multipart requests, you specify a few parameters of the request as request headers and query parameters, but you specify most parameters as multipart form data in the form of JSON metadata, in which only `part_content_type` is required. You then specify the audio files for the request as subsequent parts of the form data. Use this approach with browsers that do not support JavaScript or when the parameters of the request are greater than the 8 KB limit imposed by most HTTP servers and proxies. Use the following parameters: * **Required:** `Content-Type`, `metadata`, and `upload` * **Optional:** `Transfer-Encoding`, `model`, `customization_id`, `acoustic_customization_id`, and `customization_weight` An example of the multipart metadata for a pair of FLAC files follows. This first part of the request is sent as JSON; the remaining parts are the audio files for the request. `metadata=\"{\\\"part_content_type\\\":\\\"audio/flac\\\",\\\"data_parts_count\\\":2,\\\"inactivity_timeout\\\"=-1}\"` **Note:** You can pass the `interim_results` parameter with a recognition request made with the HTTP sessionless interface, as a query parameter for a non-multipart request or with the `MultipartRecognition` object for a multipart request. However, the service sends all results, both interim and final, at the same time, when the request completes. The service does **not** return interim results as it generates them. **Note about the Try It Out feature:** The `Try it out!` button is **not** supported for use with the the `POST /v1/recognize` method. For examples of calls to the method, see the [Speech to Text API reference](http://www.ibm.com/watson/developercloud/speech-to-text/api/v1/). + * Sends audio and returns transcription results for a sessionless recognition request. Returns only the final results; to enable interim results, use session-based requests or the WebSocket API. The service imposes a data size limit of 100 MB. It automatically detects the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to one-channel mono during transcoding. (For the `audio/l16` format, you can specify the endianness.) ###Streaming mode For requests to transcribe live audio as it becomes available or to transcribe multiple audio files with multipart requests, you must set the `Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode, the server closes the connection (status code 408) if the service receives no data chunk for 30 seconds and the service has no audio to transcribe for 30 seconds. The server also closes the connection (status code 400) if no speech is detected for `inactivity_timeout` seconds of audio (not processing time); use the `inactivity_timeout` parameter to change the default of 30 seconds. ###Non-multipart requests For non-multipart requests, you specify all parameters of the request as a collection of request headers and query parameters, and you provide the audio as the body of the request. This is the recommended means of submitting a recognition request. Use the following parameters: * **Required:** `Content-Type` and `audio` * **Optional:** `Transfer-Encoding`, `model`, `customization_id`, `acoustic_customization_id`, `customization_weight`, `inactivity_timeout`, `keywords`, `keywords_threshold`, `max_alternatives`, `word_alternatives_threshold`, `word_confidence`, `timestamps`, `profanity_filter`, `smart_formatting`, and `speaker_labels` ###Multipart requests For multipart requests, you specify a few parameters of the request as request headers and query parameters, but you specify most parameters as multipart form data in the form of JSON metadata, in which only `part_content_type` is required. You then specify the audio files for the request as subsequent parts of the form data. Use this approach with browsers that do not support JavaScript or when the parameters of the request are greater than the 8 KB limit imposed by most HTTP servers and proxies. Use the following parameters: * **Required:** `Content-Type`, `metadata`, and `upload` * **Optional:** `Transfer-Encoding`, `model`, `customization_id`, `acoustic_customization_id`, and `customization_weight` An example of the multipart metadata for a pair of FLAC files follows. This first part of the request is sent as JSON; the remaining parts are the audio files for the request. `metadata=\"{\\\"part_content_type\\\":\\\"audio/flac\\\",\\\"data_parts_count\\\":2,\\\"inactivity_timeout\\\"=-1}\"` **Note:** You can pass the `interim_results` parameter with a recognition request made with the HTTP sessionless interface, as a query parameter for a non-multipart request or with the `MultipartRecognition` object for a multipart request. However, the service sends all results, both interim and final, at the same time, when the request completes. The service does **not** return interim results as it generates them. **Note about the Try It Out feature:** The `Try it out!` button is **not** supported for use with the the `POST /v1/recognize` method. For examples of calls to the method, see the [Speech to Text API reference](http://www.ibm.com/watson/developercloud/speech-to-text/api/v1/). * * @param {Object} [params] - The parameters to send to the service. * @param {string} [params.transfer_encoding] - Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. @@ -165,9 +161,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ recognizeSessionless( params?: GeneratedSpeechToTextV1.RecognizeSessionlessParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.SpeechRecognitionResults - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -238,9 +232,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ createSession( params?: GeneratedSpeechToTextV1.CreateSessionParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.SpeechSession - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -322,9 +314,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ getSessionStatus( params: GeneratedSpeechToTextV1.GetSessionStatusParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.SessionStatus - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -352,6 +342,136 @@ class GeneratedSpeechToTextV1 extends BaseService { return createRequest(parameters, _callback); } + /** + * Observes results for a recognition task within a session. + * + * Requests results for a recognition task within the specified session. You can submit multiple requests for the same recognition task. To see interim results, set the query parameter `interim_results=true`. The request must pass the cookie that was returned by the `POST /v1/sessions` method. To see results for a specific recognition task, specify a sequence ID (with the `sequence_id` query parameter) that matches the sequence ID of the recognition request. A request with a sequence ID can arrive before, during, or after the matching recognition request, but it must arrive no later than 30 seconds after the recognition completes to avoid a session timeout (response code 408). Send multiple requests for the sequence ID with a maximum gap of 30 seconds to avoid the timeout. Omit the sequence ID to observe results for an ongoing recognition task. If no recognition task is ongoing, the method returns results for the next recognition task regardless of whether it specifies a sequence ID. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.session_id - The ID of the session whose results you want to observe. + * @param {number} [params.sequence_id] - The sequence ID of the recognition task whose results you want to observe. Omit the parameter to obtain results either for an ongoing recognition, if any, or for the next recognition task regardless of whether it specifies a sequence ID. + * @param {boolean} [params.interim_results] - If `true`, interim results are returned as a stream of JSON `SpeechRecognitionResults` objects. If `false`, the response is a single `SpeechRecognitionResults` object with final results only. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + observeResult( + params: GeneratedSpeechToTextV1.ObserveResultParams, + callback?: GeneratedSpeechToTextV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['session_id']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { + sequence_id: _params.sequence_id, + interim_results: _params.interim_results + }; + const path = { + session_id: _params.session_id + }; + const parameters = { + options: { + url: '/v1/sessions/{session_id}/observe_result', + method: 'GET', + qs: query, + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Sends audio for speech recognition within a session. + * + * Sends audio and returns transcription results for a session-based recognition request. By default, returns only the final transcription results for the request. To see interim results, set the query parameter `interim_results=true` in a `GET` request to the `observe_result` method before this `POST` request finishes. To enable polling by the `observe_result` method for large audio requests, specify an integer with the `sequence_id` query parameter for non-multipart requests or with the `sequence_id` parameter of the JSON metadata for multipart requests. The service imposes a data size limit of 100 MB per session. It automatically detects the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to one-channel mono during transcoding. (For the `audio/l16` format, you can specify the endianness.) The request must pass the cookie that was returned by the `POST /v1/sessions` method. ###Streaming mode For requests to transcribe live audio as it becomes available or to transcribe multiple audio files with multipart requests, you must set `Transfer-Encoding` to `chunked` to use streaming mode. In streaming mode, the server closes the session (status code 408) if the service receives no data chunk for 30 seconds and the service has no audio to transcribe for 30 seconds. The server also closes the session (status code 400) if no speech is detected for `inactivity_timeout` seconds of audio (not processing time); use the `inactivity_timeout` parameter to change the default of 30 seconds. For more information, see [Timeouts](https://console.bluemix.net/docs/services/speech-to-text/input.html#timeouts). ###Non-multipart requests For non-multipart requests, you specify all parameters of the request as a path parameter, request headers, and query parameters. You provide the audio as the body of the request. This is the recommended means of submitting a recognition request. Use the following parameters: * **Required:** `session_id`, `Content-Type`, and `audio` * **Optional:** `Transfer-Encoding`, `sequence_id`, `inactivity_timeout`, `keywords`, `keywords_threshold`, `max_alternatives`, `word_alternatives_threshold`, `word_confidence`, `timestamps`, `profanity_filter`, `smart_formatting`, and `speaker_labels` ###Multipart requests For multipart requests, you specify a few parameters of the request via a path parameter and as request headers, but you specify most parameters as multipart form data in the form of JSON metadata, in which only `part_content_type` is required. You then specify the audio files for the request as subsequent parts of the form data. Use this approach with browsers that do not support JavaScript or when the parameters of the request are greater than the 8 KB limit imposed by most HTTP servers and proxies. Use the following parameters: * **Required:** `session_id`, `Content-Type`, `metadata`, and `upload` * **Optional:** `Transfer-Encoding` An example of the multipart metadata for a pair of FLAC files follows. This first part of the request is sent as JSON; the remaining parts are the audio files for the request. `metadata=\"{\\\"part_content_type\\\":\\\"audio/flac\\\",\\\"data_parts_count\\\":2,\\\"inactivity_timeout\\\":-1}\"` **Note about the Try It Out feature:** The `Try it out!` button is **not** supported for use with the the `POST /v1/sessions/{session_id}/recognize` method. For examples of calls to the method, see the [Speech to Text API reference](http://www.ibm.com/watson/developercloud/speech-to-text/api/v1/). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.session_id - The ID of the session for the recognition task. + * @param {string} [params.transfer_encoding] - Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. + * @param {Blob} [params.audio] - NON-MULTIPART ONLY: Audio to transcribe in the format specified by the `Content-Type` header. **Required for a non-multipart request.**. + * @param {string} [params.content_type] - The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. + * @param {number} [params.sequence_id] - NON-MULTIPART ONLY: Sequence ID of this recognition task in the form of a user-specified integer. If omitted, no sequence ID is associated with the recognition task. + * @param {number} [params.inactivity_timeout] - NON-MULTIPART ONLY: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error and with `session_closed` set to `true`. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. + * @param {string[]} [params.keywords] - NON-MULTIPART ONLY: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results (if supported by the method). Omit the parameter or specify an empty array if you do not need to spot keywords. + * @param {number} [params.keywords_threshold] - NON-MULTIPART ONLY: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. + * @param {number} [params.max_alternatives] - NON-MULTIPART ONLY: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. + * @param {number} [params.word_alternatives_threshold] - NON-MULTIPART ONLY: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. + * @param {boolean} [params.word_confidence] - NON-MULTIPART ONLY: If `true`, confidence measure per word is returned. + * @param {boolean} [params.timestamps] - NON-MULTIPART ONLY: If `true`, time alignment for each word is returned. + * @param {boolean} [params.profanity_filter] - NON-MULTIPART ONLY: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. + * @param {boolean} [params.smart_formatting] - NON-MULTIPART ONLY: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. + * @param {boolean} [params.speaker_labels] - NON-MULTIPART ONLY: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + * @param {string} [params.metadata] - MULTIPART ONLY: Parameters for the multipart recognition request. This must be the first part of the request and must consist of JSON-formatted data. The information describes the subsequent parts of the request, which pass the audio files to be transcribed. **Required for a multipart request.**. + * @param {ReadableStream|FileObject|Buffer} [params.upload] - MULTIPART ONLY: One or more audio files for the request. For multiple audio files, set `Transfer-Encoding` to `chunked`. **Required for a multipart request.**. + * @param {string} [params.upload_content_type] - The content type of upload. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + recognizeSession( + params: GeneratedSpeechToTextV1.RecognizeSessionParams, + callback?: GeneratedSpeechToTextV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['session_id']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const formData = { + metadata: _params.metadata, + upload: { + data: _params.upload, + contentType: _params.upload_content_type + } + }; + const body = _params.audio; + const query = { + sequence_id: _params.sequence_id, + inactivity_timeout: _params.inactivity_timeout, + keywords: _params.keywords, + keywords_threshold: _params.keywords_threshold, + max_alternatives: _params.max_alternatives, + word_alternatives_threshold: _params.word_alternatives_threshold, + word_confidence: _params.word_confidence, + timestamps: _params.timestamps, + profanity_filter: _params.profanity_filter, + smart_formatting: _params.smart_formatting, + speaker_labels: _params.speaker_labels + }; + const path = { + session_id: _params.session_id + }; + const parameters = { + options: { + url: '/v1/sessions/{session_id}/recognize', + method: 'POST', + json: _params.content_type === 'application/json', + body: body, + qs: query, + path: path, + formData: formData + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json', + 'Transfer-Encoding': _params.transfer_encoding, + 'Content-Type': _params.content_type + } + }) + }; + return createRequest(parameters, _callback); + } + /************************* * asynchronous ************************/ @@ -368,9 +488,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ checkJob( params: GeneratedSpeechToTextV1.CheckJobParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.RecognitionJob - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -409,9 +527,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ checkJobs( params?: GeneratedSpeechToTextV1.CheckJobsParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.RecognitionJobs - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -466,9 +582,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ createJob( params: GeneratedSpeechToTextV1.CreateJobParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.RecognitionJob - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -570,9 +684,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ registerCallback( params: GeneratedSpeechToTextV1.RegisterCallbackParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.RegisterStatus - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -651,6 +763,7 @@ class GeneratedSpeechToTextV1 extends BaseService { * Creates a new custom language model for a specified base model. The custom language model can be used only with the base model for which it is created. The model is owned by the instance of the service whose credentials are used to create it. * * @param {Object} params - The parameters to send to the service. + * @param {string} params.content_type - The type of the input. * @param {string} params.name - A user-defined name for the new custom language model. Use a name that is unique among all custom language models that you own. Use a localized name that matches the language of the custom model. Use a name that describes the domain of the custom model, such as `Medical custom model` or `Legal custom model`. * @param {string} params.base_model_name - The name of the base language model that is to be customized by the new custom language model. The new custom model can be used only with the base model that it customizes. To determine whether a base model supports language model customization, request information about the base model and check that the attribute `custom_language_model` is set to `true`, or refer to [Language support for customization](https://console.bluemix.net/docs/services/speech-to-text/custom.html#languageSupport). * @param {string} [params.dialect] - The dialect of the specified language that is to be used with the custom language model. The parameter is meaningful only for Spanish models, for which the service creates a custom language model that is suited for speech in one of the following dialects: * `es-ES` for Castilian Spanish (the default) * `es-LA` for Latin American Spanish * `es-US` for North American (Mexican) Spanish A specified dialect must be valid for the base model. By default, the dialect matches the language of the base model; for example, `en-US` for either of the US English language models. @@ -660,13 +773,11 @@ class GeneratedSpeechToTextV1 extends BaseService { */ createLanguageModel( params: GeneratedSpeechToTextV1.CreateLanguageModelParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.LanguageModel - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; - const requiredParams = ['name', 'base_model_name']; + const requiredParams = ['content_type', 'name', 'base_model_name']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); @@ -746,9 +857,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ getLanguageModel( params: GeneratedSpeechToTextV1.GetLanguageModelParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.LanguageModel - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -788,9 +897,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ listLanguageModels( params?: GeneratedSpeechToTextV1.ListLanguageModelsParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.LanguageModels - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -957,8 +1064,8 @@ class GeneratedSpeechToTextV1 extends BaseService { * @param {string} params.customization_id - The GUID of the custom language model to which a corpus is to be added. You must make the request with service credentials created for the instance of the service that owns the custom model. * @param {string} params.corpus_name - The name of the corpus that is to be added to the custom language model. The name cannot contain spaces and cannot be the string `user`, which is reserved by the service to denote custom words added or modified by the user. Use a localized name that matches the language of the custom model. * @param {boolean} [params.allow_overwrite] - Indicates whether the specified corpus is to overwrite an existing corpus with the same name. If a corpus with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a corpus with the same name does not already exist. - * @param {ReadableStream|FileObject|Buffer} params.body - A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for the request. - * @param {string} [params.body_content_type] - The content type of body. + * @param {ReadableStream|FileObject|Buffer} params.corpus_file - A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for the request. + * @param {string} [params.corpus_file_content_type] - The content type of corpus_file. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -968,15 +1075,15 @@ class GeneratedSpeechToTextV1 extends BaseService { ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; - const requiredParams = ['customization_id', 'corpus_name', 'body']; + const requiredParams = ['customization_id', 'corpus_name', 'corpus_file']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const formData = { - body: { - data: _params.body, - contentType: _params.body_content_type + corpus_file: { + data: _params.corpus_file, + contentType: _params.corpus_file_content_type } }; const query = { @@ -1382,9 +1489,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ createAcousticModel( params: GeneratedSpeechToTextV1.CreateAcousticModelParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.AcousticModel - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -1467,9 +1572,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ getAcousticModel( params: GeneratedSpeechToTextV1.GetAcousticModelParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.AcousticModel - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -1509,9 +1612,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ listAcousticModels( params?: GeneratedSpeechToTextV1.ListAcousticModelsParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.AcousticModels - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -1637,7 +1738,7 @@ class GeneratedSpeechToTextV1 extends BaseService { * @param {string} params.audio_name - The name of the audio resource that is to be added to the custom acoustic model. The name cannot contain spaces. Use a localized name that matches the language of the custom model. * @param {string} [params.contained_content_type] - For an archive-type resource that contains audio files whose format is not `audio/wav`, specifies the format of the audio files. The header accepts all of the audio formats supported for use with speech recognition and with the `Content-Type` header, including the `rate`, `channels`, and `endianness` parameters that are used with some formats. For a complete list of supported audio formats, see [Audio formats](/docs/services/speech-to-text/input.html#formats). * @param {boolean} [params.allow_overwrite] - Indicates whether the specified audio resource is to overwrite an existing resource with the same name. If a resource with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a resource with the same name does not already exist. - * @param {ByteArray[]} params.body - The audio resource that is to be added to the custom acoustic model, an individual audio file or an archive file. + * @param {ByteArray[]} params.audio_resource - The audio resource that is to be added to the custom acoustic model, an individual audio file or an archive file. * @param {string} params.content_type - The type of the input: application/zip, application/gzip, audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} @@ -1651,14 +1752,14 @@ class GeneratedSpeechToTextV1 extends BaseService { const requiredParams = [ 'customization_id', 'audio_name', - 'body', + 'audio_resource', 'content_type' ]; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = _params.body; + const body = _params.audio_resource; const query = { allow_overwrite: _params.allow_overwrite }; @@ -1743,9 +1844,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ getAudio( params: GeneratedSpeechToTextV1.GetAudioParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.AudioListing - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -1787,9 +1886,7 @@ class GeneratedSpeechToTextV1 extends BaseService { */ listAudio( params: GeneratedSpeechToTextV1.ListAudioParams, - callback?: GeneratedSpeechToTextV1.Callback< - GeneratedSpeechToTextV1.AudioResources - > + callback?: GeneratedSpeechToTextV1.Callback ): ReadableStream | void { const _params = extend({}, params); const _callback = callback ? callback : () => {}; @@ -2288,9 +2385,9 @@ namespace GeneratedSpeechToTextV1 { /** Indicates whether the specified corpus is to overwrite an existing corpus with the same name. If a corpus with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a corpus with the same name does not already exist. **/ allow_overwrite?: boolean; /** A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for the request. **/ - body: ReadableStream | FileObject | Buffer; - /** The content type of body. **/ - body_content_type?: string; + corpus_file: ReadableStream | FileObject | Buffer; + /** The content type of corpus_file. **/ + corpus_file_content_type?: string; } /** Parameters for the `deleteCorpus` operation. **/ @@ -2461,7 +2558,7 @@ namespace GeneratedSpeechToTextV1 { /** Indicates whether the specified audio resource is to overwrite an existing resource with the same name. If a resource with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a resource with the same name does not already exist. **/ allow_overwrite?: boolean; /** The audio resource that is to be added to the custom acoustic model, an individual audio file or an archive file. **/ - body: Buffer[]; + audio_resource: Buffer[]; /** The type of the input: application/zip, application/gzip, audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. **/ content_type: AddAudioConstants.ContentType | string; } diff --git a/speech-to-text/v1.ts b/speech-to-text/v1.ts index 9bddfff6c0..6cb95ae329 100644 --- a/speech-to-text/v1.ts +++ b/speech-to-text/v1.ts @@ -108,6 +108,9 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 { } createCustomization(params, callback) { + if (params && !params.content_type) { + params.content_type = 'application/json'; + } return super.createLanguageModel(params, callback); } @@ -135,7 +138,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 { params.corpus_name = params.name; } if (params && params.corpus) { - params.body = params.corpus; + params.corpus_file = params.corpus; } return super.addCorpus(params, callback); } diff --git a/test/unit/test.adapter.conversation.v1.js b/test/unit/test.adapter.conversation.v1.js index e991f81592..9b8afbc808 100644 --- a/test/unit/test.adapter.conversation.v1.js +++ b/test/unit/test.adapter.conversation.v1.js @@ -92,16 +92,6 @@ describe('conversation-v1', function() { conversation.message(pick(params, ['workspace_id']), missingParameter); conversation.message(pick(params, ['input']), missingParameter); }); - - it('should generate version_date was not specified (negative test)', function() { - let threw = false; - try { - watson.conversation(service1); - } catch (err) { - threw = true; - assert.equal(err.message, 'Argument error: version_date was not specified, use ConversationV1.VERSION_DATE_2017_05_26'); - } - assert(threw, 'should throw an error'); - }); + }); }); diff --git a/test/unit/test.adapter.personality_insights.v3.js b/test/unit/test.adapter.personality_insights.v3.js index cb0ba120b0..5ddbb8ca33 100644 --- a/test/unit/test.adapter.personality_insights.v3.js +++ b/test/unit/test.adapter.personality_insights.v3.js @@ -72,7 +72,7 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, service_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); + assert.equal(req.headers['Content-Type'], 'text/plain'); }); it('should generate a valid payload with contentItems', function() { @@ -81,8 +81,8 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, JSON.stringify(payload)); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set - assert.equal(req.headers['content-type'], 'application/json'); + assert.equal(req.headers['Content-Language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set + assert.equal(req.headers['Content-Type'], 'application/json'); }); it('should generate a valid payload with content_items', function() { @@ -92,8 +92,8 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, JSON.stringify(payload)); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set - assert.equal(req.headers['content-type'], 'application/json'); + assert.equal(req.headers['Content-Language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set + assert.equal(req.headers['Content-Type'], 'application/json'); }); it('should generate a valid payload with html', function() { @@ -103,7 +103,7 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, html_req.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); + assert.equal(req.headers['Content-Type'], 'text/html'); }); it('should generate a valid payload with raw_scores, accept-Language and content-language', function() { @@ -123,9 +123,9 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19&raw_scores=true'); assert.equal(body, JSON.stringify(payload)); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'application/json'); - assert.equal(req.headers['content-language'], 'es'); - assert.equal(req.headers['accept-language'], 'es'); + assert.equal(req.headers['Content-Type'], 'application/json'); + assert.equal(req.headers['Content-Language'], 'es'); + assert.equal(req.headers['Accept-Language'], 'es'); }); it('should generate a valid request with { headers: {Accept: "text/csv"}}', function() { @@ -134,7 +134,7 @@ describe('personality_insights_v3', function() { const body = Buffer.from(req.body).toString('ascii'); assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, JSON.stringify(payload)); - assert.equal(req.headers['accept'], 'text/csv'); + assert.equal(req.headers['Accept'], 'text/csv'); }); it('should generate a valid request with {headers: {accept: "text/csv"}, csv_headers: true}', function() { @@ -143,7 +143,7 @@ describe('personality_insights_v3', function() { const body = Buffer.from(req.body).toString('ascii'); assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19&csv_headers=true'); assert.equal(body, JSON.stringify(payload)); - assert.equal(req.headers['accept'], 'text/csv'); + assert.equal(req.headers['Accept'], 'text/csv'); }); it('should format the response', function(done) { diff --git a/test/unit/test.adapter.tone_analyzer.v3.js b/test/unit/test.adapter.tone_analyzer.v3.js index 7b716449f1..b4d0691a71 100644 --- a/test/unit/test.adapter.tone_analyzer.v3.js +++ b/test/unit/test.adapter.tone_analyzer.v3.js @@ -26,7 +26,7 @@ describe('tone_analyzer.v3', function() { }; const service_es = extend(service, { headers: { - 'accept-language': 'es', + 'Accept-Language': 'es', 'x-custom-header': 'foo' } }); @@ -63,8 +63,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2016-05-19'); assert.equal(body, tone_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should add optional query parameters', function() { @@ -78,8 +78,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2016-05-19&sentences=true&tones=emotion'); assert.equal(body, tone_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should add optional language parameter', function() { @@ -94,9 +94,9 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2016-05-19&sentences=true&tones=emotion'); assert.equal(body, tone_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); - assert.equal(req.headers['content-language'], 'en'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); + assert.equal(req.headers['Content-Language'], 'en'); }); it('tone API should set HTML content-type', function() { @@ -106,8 +106,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2016-05-19'); assert.equal(body, tone_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/html'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should format the response', function(done) { @@ -128,10 +128,10 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2016-05-19'); assert.equal(body, tone_request.text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/html'); + assert.equal(req.headers['Accept'], 'application/json'); assert.equal(req.headers['x-custom-header'], 'foo'); - assert.equal(req.headers['accept-language'], 'es'); + assert.equal(req.headers['Accept-Language'], 'es'); }); // Tone Chat Endpoint API - test for valid payload @@ -163,8 +163,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href.slice(0, url.length), url); assert.equal(req.uri.href, service.url + tone_chat_path + '?version=2016-05-19'); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'application/json'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'application/json'); + assert.equal(req.headers['Accept'], 'application/json'); assert.ifError(err); assert(expectation.isDone()); done(); diff --git a/test/unit/test.conversation.v1.js b/test/unit/test.conversation.v1.js index e991f81592..3dc52336b8 100644 --- a/test/unit/test.conversation.v1.js +++ b/test/unit/test.conversation.v1.js @@ -93,15 +93,5 @@ describe('conversation-v1', function() { conversation.message(pick(params, ['input']), missingParameter); }); - it('should generate version_date was not specified (negative test)', function() { - let threw = false; - try { - watson.conversation(service1); - } catch (err) { - threw = true; - assert.equal(err.message, 'Argument error: version_date was not specified, use ConversationV1.VERSION_DATE_2017_05_26'); - } - assert(threw, 'should throw an error'); - }); }); }); diff --git a/test/unit/test.personality_insights.v3.js b/test/unit/test.personality_insights.v3.js index ee13199672..73286e9914 100644 --- a/test/unit/test.personality_insights.v3.js +++ b/test/unit/test.personality_insights.v3.js @@ -69,7 +69,7 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, params.content); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); + assert.equal(req.headers['Content-Type'], 'text/plain'); }); it('should generate a valid payload with json', function() { @@ -79,8 +79,8 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, JSON.stringify(params.content)); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set - assert.equal(req.headers['content-type'], 'application/json'); + assert.equal(req.headers['Content-Language'], undefined); // service bug: content-language header overrides the language specified in JSON for each content item, so it must not be set + assert.equal(req.headers['Content-Type'], 'application/json'); }); it('should generate a valid payload with html', function() { @@ -90,7 +90,7 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, params.content); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); + assert.equal(req.headers['Content-Type'], 'text/html'); }); it('should generate a valid payload with all params', function() { @@ -109,9 +109,9 @@ describe('personality_insights_v3', function() { assert.equal(req.uri.href, service.url + service_path + query_string); assert.equal(body, JSON.stringify(params.content)); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'application/json'); - assert.equal(req.headers['content-language'], 'es'); - assert.equal(req.headers['accept-language'], 'es'); + assert.equal(req.headers['Content-Type'], 'application/json'); + assert.equal(req.headers['Content-Language'], 'es'); + assert.equal(req.headers['Accept-Language'], 'es'); }); it('should generate a valid csv request', function() { @@ -120,7 +120,7 @@ describe('personality_insights_v3', function() { const body = Buffer.from(req.body).toString('ascii'); assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19'); assert.equal(body, JSON.stringify(params.content)); - assert.equal(req.headers['accept'], 'text/csv'); + assert.equal(req.headers['Accept'], 'text/csv'); }); it('should generate a valid csv request with csv headers', function() { @@ -129,7 +129,7 @@ describe('personality_insights_v3', function() { const body = Buffer.from(req.body).toString('ascii'); assert.equal(req.uri.href, service.url + service_path + '?version=2016-10-19&csv_headers=true'); assert.equal(body, JSON.stringify(params.content)); - assert.equal(req.headers['accept'], 'text/csv'); + assert.equal(req.headers['Accept'], 'text/csv'); }); it('should format the response', function(done) { diff --git a/test/unit/test.text_to_speech.v1.js b/test/unit/test.text_to_speech.v1.js index 452da686d6..a59afb691f 100644 --- a/test/unit/test.text_to_speech.v1.js +++ b/test/unit/test.text_to_speech.v1.js @@ -92,7 +92,7 @@ describe('text_to_speech', function() { const req = text_to_speech.synthesize(service_request, noop); assert.equal(req.uri.href, service.url + synthesize_request); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'application/json'); }); it('should support the customization_id option', function() { diff --git a/test/unit/test.tone_analyzer.v3.js b/test/unit/test.tone_analyzer.v3.js index 2e920b131a..a8e0305f79 100644 --- a/test/unit/test.tone_analyzer.v3.js +++ b/test/unit/test.tone_analyzer.v3.js @@ -24,7 +24,7 @@ describe('tone_analyzer.v3', function() { }; const service_es = extend(service, { headers: { - 'accept-language': 'es', + 'Accept-Language': 'es', 'x-custom-header': 'foo' } }); @@ -62,8 +62,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2017-09-21'); assert.equal(body, text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should add optional query parameters', function() { @@ -77,8 +77,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2017-09-21&sentences=true'); assert.equal(body, text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should add optional language parameters', function() { @@ -94,10 +94,10 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2017-09-21&sentences=true'); assert.equal(body, text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/plain'); - assert.equal(req.headers['accept'], 'application/json'); - assert.equal(req.headers['content-language'], 'en'); - assert.equal(req.headers['accept-language'], 'en'); + assert.equal(req.headers['Content-Type'], 'text/plain'); + assert.equal(req.headers['Accept'], 'application/json'); + assert.equal(req.headers['Content-Language'], 'en'); + assert.equal(req.headers['Accept-Language'], 'en'); }); it('tone API should set HTML content-type', function() { @@ -107,8 +107,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2017-09-21'); assert.equal(body, text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/html'); + assert.equal(req.headers['Accept'], 'application/json'); }); it('tone API should format the response', function(done) { @@ -130,10 +130,10 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href, service.url + tone_path + '?version=2017-09-21'); assert.equal(body, text); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'text/html'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'text/html'); + assert.equal(req.headers['Accept'], 'application/json'); assert.equal(req.headers['x-custom-header'], 'foo'); - assert.equal(req.headers['accept-language'], 'es'); + assert.equal(req.headers['Accept-Language'], 'es'); }); // Tone Chat Endpoint API - test for valid payload @@ -165,8 +165,8 @@ describe('tone_analyzer.v3', function() { assert.equal(req.uri.href.slice(0, url.length), url); assert.equal(req.uri.href, service.url + tone_chat_path + '?version=2017-09-21'); assert.equal(req.method, 'POST'); - assert.equal(req.headers['content-type'], 'application/json'); - assert.equal(req.headers['accept'], 'application/json'); + assert.equal(req.headers['Content-Type'], 'application/json'); + assert.equal(req.headers['Accept'], 'application/json'); assert.ifError(err); assert(expectation.isDone()); done(); diff --git a/text-to-speech/v1-generated.ts b/text-to-speech/v1-generated.ts index e9668a495d..0707e836fe 100644 --- a/text-to-speech/v1-generated.ts +++ b/text-to-speech/v1-generated.ts @@ -14,14 +14,14 @@ * limitations under the License. */ -import extend = require('extend'); +import * as extend from 'extend'; import { RequestResponse } from 'request'; import { createRequest } from '../lib/requestwrapper'; import { getMissingParams } from '../lib/helper'; import { BaseService } from '../lib/base_service'; /** - * ### Service Overview The IBM Text to Speech service provides a Representational State Transfer (REST) Application Programming Interface (API) that uses IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, dialects, and voices. The service currently synthesizes text from US English, UK English, French, German, Italian, Japanese, Spanish, or Brazilian Portuguese into audio spoken in a male or female voice (the service supports only a single gender for some languages). The audio is streamed back to the client with minimal delay. ### API Overview The Text to Speech service consists of the following related endpoints: * `/v1/synthesize` synthesizes written text to audio speech. * `/v1/voices` provides information about the voices available for synthesized speech. * `/v1/pronunciation` returns the pronunciation for a specified word. * `/v1/customizations` and `/v1/customizations/{customization_id}` lets users create custom voice models, which are dictionaries of words and their translations for use in speech synthesis. * `/v1/customizations/{customization_id}/words` and `/v1/customizations/{customization_id}/words/{word}` lets users manage the words in a custom voice model. **Note:** The `/v1/pronunciation` and `/v1/customizations` interfaces are currently beta functionality. ### API Usage The following information provides details about using the service to synthesize audio: * **Audio formats:** The service supports a number of audio formats (MIME types). For more information about audio formats and sampling rates, including links to a number of Internet sites that provide technical and usage details about the different formats, see [Specifying an audio format](https://console.bluemix.net/docs/services/text-to-speech/http.html#format). * **SSML:** Many methods refer to the Speech Synthesis Markup Language (SSML), an XML-based markup language that provides annotations of text for speech-synthesis applications; for example, many methods accept or produce translations that use an SSML-based phoneme format. See [Using SSML](https://console.bluemix.net/docs/services/text-to-speech/SSML.html) and [Using IBM SPR](https://console.bluemix.net/docs/services/text-to-speech/SPRs.html). * **Word translations:** Many customization methods accept or return sounds-like or phonetic translations for words. A phonetic translation is based on the SSML format for representing the phonetic string of a word. Phonetic translations can use standard International Phonetic Alphabet (IPA) representation: <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> or the proprietary IBM Symbolic Phonetic Representation (SPR): <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> For more information about customization and about sounds-like and phonetic translations, see [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * **GUIDs:** The pronunciation and customization methods accept or return a Globally Unique Identifier (GUID). For example, customization IDs (specified with the `customization_id` parameter) and service credentials are GUIDs. GUIDs are hexadecimal strings that have the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. * **WebSocket interface:** The service also offers a WebSocket interface as an alternative to its HTTP REST interface for speech synthesis. The WebSocket interface supports both plain text and SSML input, including the SSML <mark> element and word timings. See [The WebSocket interface](https://console.bluemix.net/docs/services/text-to-speech/websockets.html). * **Authentication:** You authenticate to the service by using your service credentials. You can use your credentials to authenticate via a proxy server that resides in IBM Cloud, or you can use your credentials to obtain a token and contact the service directly. See [Service credentials for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) and [Tokens for authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). * **Custom voice model ownership:** In all cases, you must use service credentials created for the instance of the service that owns a custom voice model to use the methods described in this documentation with that model. For more information, see [Ownership of custom voice models](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#customOwner). * **Request Logging:** By default, all Watson services log requests and their results. Data is collected only to improve the Watson services. If you do not want to share your data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. Data is collected for any request that omits this header. See [Controlling request logging for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). The service does not log data (words and translations) that are used to build custom language models; your training data is never used to improve the service's base models. The service does log data when a custom model is used with a synthesize request; you must set the `X-Watson-Learning-Opt-Out` request header to prevent logging for recognition requests. For more information, see [Request logging and data privacy](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#customLogging). For more information about the service and its various interfaces, see [About Text to Speech](https://console.bluemix.net/docs/services/text-to-speech/index.html). + * ### Service Overview The IBM Text to Speech service provides a Representational State Transfer (REST) Application Programming Interface (API) that uses IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, dialects, and voices. The service currently synthesizes text from US English, UK English, French, German, Italian, Japanese, Spanish, or Brazilian Portuguese into audio spoken in a male or female voice (the service supports only a single gender for some languages). The audio is streamed back to the client with minimal delay. ### API Overview The Text to Speech service consists of the following related endpoints: * `/v1/voices` provides information about the voices available for synthesized speech. * `/v1/synthesize` synthesizes written text to audio speech. * `/v1/pronunciation` returns the pronunciation for a specified word. * `/v1/customizations` and `/v1/customizations/{customization_id}` lets users create custom voice models, which are dictionaries of words and their translations for use in speech synthesis. * `/v1/customizations/{customization_id}/words` and `/v1/customizations/{customization_id}/words/{word}` lets users manage the words in a custom voice model. **Note:** The `/v1/pronunciation` and `/v1/customizations` interfaces are currently beta functionality. ### API Usage The following information provides details about using the service to synthesize audio: * **Audio formats:** The service supports a number of audio formats (MIME types). For more information about audio formats and sampling rates, including links to a number of Internet sites that provide technical and usage details about the different formats, see [Specifying an audio format](https://console.bluemix.net/docs/services/text-to-speech/http.html#format). * **SSML:** Many methods refer to the Speech Synthesis Markup Language (SSML), an XML-based markup language that provides annotations of text for speech-synthesis applications; for example, many methods accept or produce translations that use an SSML-based phoneme format. See [Using SSML](https://console.bluemix.net/docs/services/text-to-speech/SSML.html) and [Using IBM SPR](https://console.bluemix.net/docs/services/text-to-speech/SPRs.html). * **Word translations:** Many customization methods accept or return sounds-like or phonetic translations for words. A phonetic translation is based on the SSML format for representing the phonetic string of a word. Phonetic translations can use standard International Phonetic Alphabet (IPA) representation: <phoneme alphabet=\"ipa\" ph=\"təmˈɑto\"></phoneme> or the proprietary IBM Symbolic Phonetic Representation (SPR): <phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"></phoneme> For more information about customization and about sounds-like and phonetic translations, see [Understanding customization](https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html). * **GUIDs:** The pronunciation and customization methods accept or return a Globally Unique Identifier (GUID). For example, customization IDs (specified with the `customization_id` parameter) and service credentials are GUIDs. GUIDs are hexadecimal strings that have the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. * **WebSocket interface:** The service also offers a WebSocket interface as an alternative to its HTTP REST interface for speech synthesis. The WebSocket interface supports both plain text and SSML input, including the SSML <mark> element and word timings. See [The WebSocket interface](https://console.bluemix.net/docs/services/text-to-speech/websockets.html). * **Authentication:** You authenticate to the service by using your service credentials. You can use your credentials to authenticate via a proxy server that resides in IBM Cloud, or you can use your credentials to obtain a token and contact the service directly. See [Service credentials for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) and [Tokens for authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). * **Custom voice model ownership:** In all cases, you must use service credentials created for the instance of the service that owns a custom voice model to use the methods described in this documentation with that model. For more information, see [Ownership of custom voice models](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#customOwner). * **Request Logging:** By default, all Watson services log requests and their results. Data is collected only to improve the Watson services. If you do not want to share your data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. Data is collected for any request that omits this header. See [Controlling request logging for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). The service does not log data (words and translations) that are used to build custom language models; your training data is never used to improve the service's base models. The service does log data when a custom model is used with a synthesize request; you must set the `X-Watson-Learning-Opt-Out` request header to prevent logging for recognition requests. For more information, see [Request logging and data privacy](https://console.bluemix.net/docs/services/text-to-speech/custom-models.html#customLogging). For more information about the service and its various interfaces, see [About Text to Speech](https://console.bluemix.net/docs/services/text-to-speech/index.html). */ class GeneratedTextToSpeechV1 extends BaseService { @@ -48,27 +48,65 @@ class GeneratedTextToSpeechV1 extends BaseService { } /************************* - * customModels + * voices ************************/ /** - * Creates a new custom voice model. + * Retrieves a specific voice available for speech synthesis. * - * Creates a new empty custom voice model. The model is owned by the instance of the service whose credentials are used to create it. **Note:** This method is currently a beta release. + * Lists information about the voice specified with the `voice` path parameter. Specify the `customization_id` query parameter to obtain information for that custom voice model of the specified voice. Use the `GET /v1/voices` method to see a list of all available voices. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.voice - The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. + * @param {string} [params.customization_id] - The GUID of a custom voice model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + getVoice( + params: GeneratedTextToSpeechV1.GetVoiceParams, + callback?: GeneratedTextToSpeechV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['voice']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { + customization_id: _params.customization_id + }; + const path = { + voice: _params.voice + }; + const parameters = { + options: { + url: '/v1/voices/{voice}', + method: 'GET', + qs: query, + path: path + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /** + * Retrieves all voices available for speech synthesis. + * + * Lists information about all available voices. To see information about a specific voice, use the `GET /v1/voices/{voice}` method. * * @param {Object} [params] - The parameters to send to the service. - * @param {string} [params.name] - When you create a new custom voice model, you must specify the name of the new custom model. When you update an existing custom model, specify a name only if you want to change the model's name. - * @param {string} [params.language] - When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. - * @param {string} [params.description] - A description of the custom voice model. When you create a new custom voice model, specifying a description is recommended. When you update an existing custom model, specify a description only if you want to change the model's description. - * @param {Word[]} [params.words] - When you update an existing custom voice model, an array of words and their translations to be added to or updated in the custom voice model; pass an empty array to make no additions or updates. **Note:** When you create a new custom model, you cannot specify words for the new model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - createCustomization( - params?: GeneratedTextToSpeechV1.CreateCustomizationParams, - callback?: GeneratedTextToSpeechV1.Callback< - GeneratedTextToSpeechV1.Customization - > + listVoices( + params?: GeneratedTextToSpeechV1.ListVoicesParams, + callback?: GeneratedTextToSpeechV1.Callback ): ReadableStream | void { const _params = typeof params === 'function' && !callback ? {} : extend({}, params); @@ -76,11 +114,159 @@ class GeneratedTextToSpeechV1 extends BaseService { typeof params === 'function' && !callback ? params : callback ? callback : () => {}; + const parameters = { + options: { + url: '/v1/voices', + method: 'GET' + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /************************* + * synthesize + ************************/ + + /** + * Streaming speech synthesis of the text in the body parameter. + * + * Synthesizes text to spoken audio, returning the synthesized audio stream as an array of bytes. Identical to the `GET` method but passes longer text in the body of the request, not with the URL. Text size is limited to 5 KB. If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning includes a descriptive message and a list of invalid argument strings. For example, a message such as `\"Unknown arguments:\"` or `\"Unknown url query arguments:\"` followed by a list of the form `\"invalid_arg_1, invalid_arg_2.\"` The request succeeds despite the warnings. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} [params.accept] - The requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). + * @param {string} [params.voice] - The voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. + * @param {string} [params.customization_id] - The GUID of a custom voice model to use for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. + * @param {string} params.text - The text to synthesize. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + synthesize( + params: GeneratedTextToSpeechV1.SynthesizeParams, + callback?: GeneratedTextToSpeechV1.Callback + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const body = { + text: _params.text + }; + const query = { + accept: _params.accept2, + voice: _params.voice, + customization_id: _params.customization_id + }; + const parameters = { + options: { + url: '/v1/synthesize', + method: 'POST', + json: true, + body: body, + qs: query, + encoding: null + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: _params.accept || 'audio/basic', + 'Content-Type': 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /************************* + * pronunciation + ************************/ + + /** + * Gets the pronunciation for a word. + * + * Returns the phonetic pronunciation for the word specified by the `text` parameter. You can request the pronunciation for a specific format. You can also request the pronunciation for a specific voice to see the default translation for the language of that voice or for a specific custom voice model to see the translation for that voice model. **Note:** This method is currently a beta release. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.text - The word for which the pronunciation is requested. + * @param {string} [params.voice] - A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. Retrieve available voices with the `GET /v1/voices` method. + * @param {string} [params.format] - The phoneme format in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. + * @param {string} [params.customization_id] - The GUID of a custom voice model for which the pronunciation is to be returned. The language of a specified custom model must match the language of the specified voice. If the word is not defined in the specified custom model, the service returns the default translation for the custom model's language. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see the translation for the specified voice with no customization. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + getPronunciation( + params: GeneratedTextToSpeechV1.GetPronunciationParams, + callback?: GeneratedTextToSpeechV1.Callback< + GeneratedTextToSpeechV1.Pronunciation + > + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['text']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } + const query = { + text: _params.text, + voice: _params.voice, + format: _params.format, + customization_id: _params.customization_id + }; + const parameters = { + options: { + url: '/v1/pronunciation', + method: 'GET', + qs: query + }, + defaultOptions: extend(true, {}, this._options, { + headers: { + Accept: 'application/json' + } + }) + }; + return createRequest(parameters, _callback); + } + + /************************* + * customVoiceModels + ************************/ + + /** + * Creates a new custom voice model. + * + * Creates a new empty custom voice model. The model is owned by the instance of the service whose credentials are used to create it. **Note:** This method is currently a beta release. + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.name - The name of the new custom voice model. + * @param {string} [params.language] - The language of the new custom voice model. Omit the parameter to use the the default language, `en-US`. + * @param {string} [params.description] - A description of the new custom voice model. Specifying a description is recommended. + * @param {Function} [callback] - The callback that handles the response. + * @returns {ReadableStream|void} + */ + createVoiceModel( + params: GeneratedTextToSpeechV1.CreateVoiceModelParams, + callback?: GeneratedTextToSpeechV1.Callback< + GeneratedTextToSpeechV1.VoiceModel + > + ): ReadableStream | void { + const _params = extend({}, params); + const _callback = callback ? callback : () => {}; + const requiredParams = ['name']; + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return _callback(missingParams); + } const body = { name: _params.name, language: _params.language, - description: _params.description, - words: _params.words + description: _params.description }; const parameters = { options: { @@ -91,8 +277,8 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -105,12 +291,12 @@ class GeneratedTextToSpeechV1 extends BaseService { * Deletes the custom voice model with the specified `customization_id`. You must use credentials for the instance of the service that owns a model to delete it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - deleteCustomization( - params: GeneratedTextToSpeechV1.DeleteCustomizationParams, + deleteVoiceModel( + params: GeneratedTextToSpeechV1.DeleteVoiceModelParams, callback?: GeneratedTextToSpeechV1.Callback ): ReadableStream | void { const _params = extend({}, params); @@ -142,14 +328,14 @@ class GeneratedTextToSpeechV1 extends BaseService { * Lists all information about the custom voice model with the specified `customization_id`. In addition to metadata such as the name and description of the voice model, the output includes the words in the model and their translations as defined in the model. To see just the metadata for a voice model, use the `GET /v1/customizations` method. You must use credentials for the instance of the service that owns a model to list information about it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - getCustomization( - params: GeneratedTextToSpeechV1.GetCustomizationParams, + getVoiceModel( + params: GeneratedTextToSpeechV1.GetVoiceModelParams, callback?: GeneratedTextToSpeechV1.Callback< - GeneratedTextToSpeechV1.Customization + GeneratedTextToSpeechV1.VoiceModel > ): ReadableStream | void { const _params = extend({}, params); @@ -170,7 +356,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; @@ -183,14 +369,14 @@ class GeneratedTextToSpeechV1 extends BaseService { * Lists metadata such as the name and description for the custom voice models that you own. Use the `language` query parameter to list the voice models that you own for the specified language only. Omit the parameter to see all voice models that you own for all languages. To see the words in addition to the metadata for a specific voice model, use the `GET /v1/customizations/{customization_id}` method. You must use credentials for the instance of the service that owns a model to list information about it. **Note:** This method is currently a beta release. * * @param {Object} [params] - The parameters to send to the service. - * @param {string} [params.language] - The language for the custom voice models owned by the requesting service credentials that are to be returned. Omit the parameter to see all custom voice models owned by the requester. + * @param {string} [params.language] - The language for which custom voice models that are owned by the requesting service credentials are to be returned. Omit the parameter to see all custom voice models that are owned by the requester. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - listCustomizations( - params?: GeneratedTextToSpeechV1.ListCustomizationsParams, + listVoiceModels( + params?: GeneratedTextToSpeechV1.ListVoiceModelsParams, callback?: GeneratedTextToSpeechV1.Callback< - GeneratedTextToSpeechV1.Customizations + GeneratedTextToSpeechV1.VoiceModels > ): ReadableStream | void { const _params = @@ -210,7 +396,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; @@ -223,16 +409,15 @@ class GeneratedTextToSpeechV1 extends BaseService { * Updates information for the custom voice model with the specified `customization_id`. You can update the metadata such as the name and description of the voice model. You can also update the words in the model and their translations. Adding a new translation for a word that already exists in a custom model overwrites the word's existing translation. A custom model can contain no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to update it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. - * @param {string} [params.name] - When you create a new custom voice model, you must specify the name of the new custom model. When you update an existing custom model, specify a name only if you want to change the model's name. - * @param {string} [params.language] - When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. - * @param {string} [params.description] - A description of the custom voice model. When you create a new custom voice model, specifying a description is recommended. When you update an existing custom model, specify a description only if you want to change the model's description. - * @param {Word[]} [params.words] - When you update an existing custom voice model, an array of words and their translations to be added to or updated in the custom voice model; pass an empty array to make no additions or updates. **Note:** When you create a new custom model, you cannot specify words for the new model. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} [params.name] - A new name for the custom voice model. + * @param {string} [params.description] - A new description for the custom voice model. + * @param {CustomWord[]} [params.words] - An array of words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - updateCustomization( - params: GeneratedTextToSpeechV1.UpdateCustomizationParams, + updateVoiceModel( + params: GeneratedTextToSpeechV1.UpdateVoiceModelParams, callback?: GeneratedTextToSpeechV1.Callback ): ReadableStream | void { const _params = extend({}, params); @@ -244,7 +429,6 @@ class GeneratedTextToSpeechV1 extends BaseService { } const body = { name: _params.name, - language: _params.language, description: _params.description, words: _params.words }; @@ -261,7 +445,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - 'content-type': 'application/json' + 'Content-Type': 'application/json' } }) }; @@ -278,9 +462,9 @@ class GeneratedTextToSpeechV1 extends BaseService { * Adds a single word and its translation to the custom voice model with the specified `customization_id`. Adding a new translation for a word that already exists in a custom model overwrites the word's existing translation. A custom model can contain no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to add a word to it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to which to which to add a word. You must make the request with service credentials created for the instance of the service that owns the custom model. - * @param {string} params.word - The word to be added to the custom voice model. - * @param {string} params.translation - Phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.word - The word that is to be added or updated for the custom voice model. + * @param {string} params.translation - The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. * @param {string} [params.part_of_speech] - **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} @@ -314,7 +498,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - 'content-type': 'application/json' + 'Content-Type': 'application/json' } }) }; @@ -327,8 +511,8 @@ class GeneratedTextToSpeechV1 extends BaseService { * Adds one or more words and their translations to the custom voice model with the specified `customization_id`. Adding a new translation for a word that already exists in a custom model overwrites the word's existing translation. A custom model can contain no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to add words to it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. - * @param {Word[]} params.words - When you add words to a custom voice model, you provide the required information for each word. When you list the words from a custom model, the service returns information about all words from the model in alphabetical order, with uppercase letters listed before lowercase letters; the array is empty if the custom model contains no words. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {CustomWord[]} params.words - An array of `CustomWord` objects that provides information about the words and their translations that are to be added or updated for the custom voice model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -359,7 +543,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - 'content-type': 'application/json' + 'Content-Type': 'application/json' } }) }; @@ -372,8 +556,8 @@ class GeneratedTextToSpeechV1 extends BaseService { * Deletes a single word from the custom voice model with the specified `customization_id`. You must use credentials for the instance of the service that owns a model to delete it. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model from which to delete a word. You must make the request with service credentials created for the instance of the service that owns the custom model. - * @param {string} params.word - The word to be deleted from the custom voice model. + * @param {string} params.customization_id - The GUID of the custom voice model from which to delete a word. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.word - The word that is to be deleted from the custom voice model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -411,8 +595,8 @@ class GeneratedTextToSpeechV1 extends BaseService { * Returns the translation for a single word from the custom model with the specified `customization_id`. The output shows the translation as it is defined in the model. You must use credentials for the instance of the service that owns a model to query information about its words. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model in which to query a word. You must make the request with service credentials created for the instance of the service that owns the custom model. - * @param {string} params.word - The word to be queried from the custom voice model. + * @param {string} params.customization_id - The GUID of the custom voice model from which to query a word. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.word - The word that is to be queried from the custom voice model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -441,7 +625,7 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; @@ -454,7 +638,7 @@ class GeneratedTextToSpeechV1 extends BaseService { * Lists all of the words and their translations for the custom voice model with the specified `customization_id`. The output shows the translations as they are defined in the model. You must use credentials for the instance of the service that owns a model to query information about its words. **Note:** This method is currently a beta release. * * @param {Object} params - The parameters to send to the service. - * @param {string} params.customization_id - GUID of the custom voice model to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + * @param {string} params.customization_id - The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -480,246 +664,180 @@ class GeneratedTextToSpeechV1 extends BaseService { }, defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json' + Accept: 'application/json' } }) }; return createRequest(parameters, _callback); } +} - /************************* - * pronunciation - ************************/ +GeneratedTextToSpeechV1.prototype.name = 'text_to_speech'; +GeneratedTextToSpeechV1.prototype.version = 'v1'; - /** - * Gets the pronunciation for a word. - * - * Returns the phonetic pronunciation for the word specified by the `text` parameter. You can request the pronunciation for a specific format. You can also request the pronunciation for a specific voice to see the default translation for the language of that voice or for a specific custom voice model to see the translation for that voice model. **Note:** This method is currently a beta release. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.text - The word for which the pronunciation is requested. - * @param {string} [params.voice] - Specify a voice to obtain the pronunciation for the specified word in the language of that voice. All voices for the same language (for example, `en-US`) return the same translation. Do not specify both a `voice` and a `customization_id`. Retrieve available voices with the `GET /v1/voices` method. - * @param {string} [params.format] - Specify the phoneme set in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. - * @param {string} [params.customization_id] - GUID of a custom voice model for which the pronunciation is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. If the word is not defined in the specified voice model, the service returns the default translation for the model's language. Omit the parameter to see the translation for the specified voice with no customization. Do not specify both a `voice` and a `customization_id`. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - pronunciation( - params: GeneratedTextToSpeechV1.PronunciationParams, - callback?: GeneratedTextToSpeechV1.Callback< - GeneratedTextToSpeechV1.Pronunciation - > - ): ReadableStream | void { - const _params = extend({}, params); - const _callback = callback ? callback : () => {}; - const requiredParams = ['text']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const query = { - text: _params.text, - voice: _params.voice, - format: _params.format, - customization_id: _params.customization_id - }; - const parameters = { - options: { - url: '/v1/pronunciation', - method: 'GET', - qs: query - }, - defaultOptions: extend(true, {}, this._options, { - headers: { - accept: 'application/json' - } - }) - }; - return createRequest(parameters, _callback); - } +/************************* + * interfaces + ************************/ + +namespace GeneratedTextToSpeechV1 { + /** Options for the `GeneratedTextToSpeechV1` constructor. **/ + export type Options = { + url?: string; + username?: string; + password?: string; + use_unauthenticated?: boolean; + headers?: object; + }; + + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} /************************* - * synthesize + * request interfaces ************************/ - /** - * Streaming speech synthesis of the text in the body parameter. - * - * Synthesizes text to spoken audio, returning the synthesized audio stream as an array of bytes. Identical to the `GET` method but passes longer text in the body of the request, not with the URL. Text size is limited to 5 KB. If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning includes a descriptive message and a list of invalid argument strings. For example, a message such as `\"Unknown arguments:\"` or `\"Unknown url query arguments:\"` followed by a list of the form `\"invalid_arg_1, invalid_arg_2.\"` The request succeeds despite the warnings. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} [params.accept] - Requested audio format (MIME type) of the audio. You can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian. - * @param {string} [params.voice] - Selects a voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. - * @param {string} [params.customization_id] - GUID of a custom voice model to be used for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. - * @param {string} params.text - Text to synthesize. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - synthesize( - params: GeneratedTextToSpeechV1.SynthesizeParams, - callback?: GeneratedTextToSpeechV1.Callback - ): ReadableStream | void { - const _params = extend({}, params); - const _callback = callback ? callback : () => {}; - const requiredParams = ['text']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); - } - const body = { - text: _params.text - }; - const query = { - voice: _params.voice, - customization_id: _params.customization_id - }; - const parameters = { - options: { - url: '/v1/synthesize', - method: 'POST', - json: true, - body: body, - qs: query, - encoding: null - }, - defaultOptions: extend(true, {}, this._options, { - headers: { - accept: _params.accept || 'audio/basic', - 'content-type': 'application/json' - } - }) - }; - return createRequest(parameters, _callback); + /** Parameters for the `getVoice` operation. **/ + export interface GetVoiceParams { + /** The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. **/ + voice: GetVoiceConstants.Voice | string; + /** The GUID of a custom voice model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. **/ + customization_id?: string; } - /************************* - * voices - ************************/ - - /** - * Retrieves a specific voice available for speech synthesis. - * - * Lists information about the voice specified with the `voice` path parameter. Specify the `customization_id` query parameter to obtain information for that custom voice model of the specified voice. Use the `GET /v1/voices` method to see a list of all available voices. - * - * @param {Object} params - The parameters to send to the service. - * @param {string} params.voice - The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. - * @param {string} [params.customization_id] - GUID of the custom voice model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - getVoice( - params: GeneratedTextToSpeechV1.GetVoiceParams, - callback?: GeneratedTextToSpeechV1.Callback - ): ReadableStream | void { - const _params = extend({}, params); - const _callback = callback ? callback : () => {}; - const requiredParams = ['voice']; - const missingParams = getMissingParams(_params, requiredParams); - if (missingParams) { - return _callback(missingParams); + /** Constants for the `getVoice` operation. **/ + export namespace GetVoiceConstants { + /** The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. **/ + export enum Voice { + EN_US_ALLISONVOICE = 'en-US_AllisonVoice', + EN_US_LISAVOICE = 'en-US_LisaVoice', + EN_US_MICHAELVOICE = 'en-US_MichaelVoice', + EN_GB_KATEVOICE = 'en-GB_KateVoice', + ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', + ES_ES_LAURAVOICE = 'es-ES_LauraVoice', + ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', + ES_US_SOFIAVOICE = 'es-US_SofiaVoice', + DE_DE_DIETERVOICE = 'de-DE_DieterVoice', + DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', + FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', + IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', + JA_JP_EMIVOICE = 'ja-JP_EmiVoice', + PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' } - const query = { - customization_id: _params.customization_id - }; - const path = { - voice: _params.voice - }; - const parameters = { - options: { - url: '/v1/voices/{voice}', - method: 'GET', - qs: query, - path: path - }, - defaultOptions: extend(true, {}, this._options, { - headers: { - accept: 'application/json' - } - }) - }; - return createRequest(parameters, _callback); } - /** - * Retrieves all voices available for speech synthesis. - * - * Lists information about all available voices. To see information about a specific voice, use the `GET /v1/voices/{voice}` method. - * - * @param {Object} [params] - The parameters to send to the service. - * @param {Function} [callback] - The callback that handles the response. - * @returns {ReadableStream|void} - */ - listVoices( - params?: GeneratedTextToSpeechV1.ListVoicesParams, - callback?: GeneratedTextToSpeechV1.Callback - ): ReadableStream | void { - const _params = - typeof params === 'function' && !callback ? {} : extend({}, params); - const _callback = - typeof params === 'function' && !callback - ? params - : callback ? callback : () => {}; - const parameters = { - options: { - url: '/v1/voices', - method: 'GET' - }, - defaultOptions: extend(true, {}, this._options, { - headers: { - accept: 'application/json' - } - }) - }; - return createRequest(parameters, _callback); - } -} - -GeneratedTextToSpeechV1.prototype.name = 'text_to_speech'; -GeneratedTextToSpeechV1.prototype.version = 'v1'; + /** Parameters for the `listVoices` operation. **/ + export interface ListVoicesParams {} -/************************* - * interfaces - ************************/ + /** Parameters for the `synthesize` operation. **/ + export interface SynthesizeParams { + /** The requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ + accept?: SynthesizeConstants.Accept | string; + /** The requested audio format (MIME type) of the audio. You can use this query parameter or the `Accept` header to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ + accept2?: SynthesizeConstants.Accept | string; + /** The voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. **/ + voice?: SynthesizeConstants.Voice | string; + /** The GUID of a custom voice model to use for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. **/ + customization_id?: string; + /** The text to synthesize. **/ + text: string; + } -namespace GeneratedTextToSpeechV1 { - /** Options for the `GeneratedTextToSpeechV1` constructor. **/ - export type Options = { - url?: string; - username?: string; - password?: string; - use_unauthenticated?: boolean; - headers?: object; - }; + /** Constants for the `synthesize` operation. **/ + export namespace SynthesizeConstants { + /** The requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ + export enum Accept { + BASIC = 'audio/basic', + FLAC = 'audio/flac', + L16_RATE = 'audio/l16', + OGG = 'audio/ogg', + OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', + OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', + MP3 = 'audio/mp3', + MPEG = 'audio/mpeg', + MULAW_RATE = 'audio/mulaw', + WAV = 'audio/wav', + WEBM = 'audio/webm', + WEBM_CODECS_OPUS = 'audio/webm:codecs=opus', + WEBM_CODECS_VORBIS = 'audio/webm:codecs=vorbis' + } - /** The callback for a service request. **/ - export type Callback = ( - error: any, - body?: T, - response?: RequestResponse - ) => void; + /** The voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. **/ + export enum Voice { + EN_US_ALLISONVOICE = 'en-US_AllisonVoice', + EN_US_LISAVOICE = 'en-US_LisaVoice', + EN_US_MICHAELVOICE = 'en-US_MichaelVoice', + EN_GB_KATEVOICE = 'en-GB_KateVoice', + ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', + ES_ES_LAURAVOICE = 'es-ES_LauraVoice', + ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', + ES_US_SOFIAVOICE = 'es-US_SofiaVoice', + DE_DE_DIETERVOICE = 'de-DE_DieterVoice', + DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', + FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', + IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', + JA_JP_EMIVOICE = 'ja-JP_EmiVoice', + PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' + } + } - /** The body of a service request that returns no response data. **/ - export interface Empty {} + /** Parameters for the `getPronunciation` operation. **/ + export interface GetPronunciationParams { + /** The word for which the pronunciation is requested. **/ + text: string; + /** A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. Retrieve available voices with the `GET /v1/voices` method. **/ + voice?: GetPronunciationConstants.Voice | string; + /** The phoneme format in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. **/ + format?: GetPronunciationConstants.Format | string; + /** The GUID of a custom voice model for which the pronunciation is to be returned. The language of a specified custom model must match the language of the specified voice. If the word is not defined in the specified custom model, the service returns the default translation for the custom model's language. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see the translation for the specified voice with no customization. **/ + customization_id?: string; + } - /************************* - * request interfaces - ************************/ + /** Constants for the `getPronunciation` operation. **/ + export namespace GetPronunciationConstants { + /** A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. Retrieve available voices with the `GET /v1/voices` method. **/ + export enum Voice { + EN_US_ALLISONVOICE = 'en-US_AllisonVoice', + EN_US_LISAVOICE = 'en-US_LisaVoice', + EN_US_MICHAELVOICE = 'en-US_MichaelVoice', + EN_GB_KATEVOICE = 'en-GB_KateVoice', + ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', + ES_ES_LAURAVOICE = 'es-ES_LauraVoice', + ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', + ES_US_SOFIAVOICE = 'es-US_SofiaVoice', + DE_DE_DIETERVOICE = 'de-DE_DieterVoice', + DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', + FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', + IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', + JA_JP_EMIVOICE = 'ja-JP_EmiVoice', + PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' + } + /** The phoneme format in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. **/ + export enum Format { + IPA = 'ipa', + IBM = 'ibm' + } + } - /** Parameters for the `createCustomization` operation. **/ - export interface CreateCustomizationParams { - /** When you create a new custom voice model, you must specify the name of the new custom model. When you update an existing custom model, specify a name only if you want to change the model's name. **/ - name?: string; - /** When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. **/ - language?: CreateCustomizationConstants.Language | string; - /** A description of the custom voice model. When you create a new custom voice model, specifying a description is recommended. When you update an existing custom model, specify a description only if you want to change the model's description. **/ + /** Parameters for the `createVoiceModel` operation. **/ + export interface CreateVoiceModelParams { + /** The name of the new custom voice model. **/ + name: string; + /** The language of the new custom voice model. Omit the parameter to use the the default language, `en-US`. **/ + language?: CreateVoiceModelConstants.Language | string; + /** A description of the new custom voice model. Specifying a description is recommended. **/ description?: string; - /** When you update an existing custom voice model, an array of words and their translations to be added to or updated in the custom voice model; pass an empty array to make no additions or updates. **Note:** When you create a new custom model, you cannot specify words for the new model. **/ - words?: Word[]; } - /** Constants for the `createCustomization` operation. **/ - export namespace CreateCustomizationConstants { - /** When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. **/ + /** Constants for the `createVoiceModel` operation. **/ + export namespace CreateVoiceModelConstants { + /** The language of the new custom voice model. Omit the parameter to use the the default language, `en-US`. **/ export enum Language { DE_DE = 'de-DE', EN_US = 'en-US', @@ -734,27 +852,27 @@ namespace GeneratedTextToSpeechV1 { } } - /** Parameters for the `deleteCustomization` operation. **/ - export interface DeleteCustomizationParams { - /** GUID of the custom voice model to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** Parameters for the `deleteVoiceModel` operation. **/ + export interface DeleteVoiceModelParams { + /** The GUID of the custom voice model that is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; } - /** Parameters for the `getCustomization` operation. **/ - export interface GetCustomizationParams { - /** GUID of the custom voice model to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** Parameters for the `getVoiceModel` operation. **/ + export interface GetVoiceModelParams { + /** The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; } - /** Parameters for the `listCustomizations` operation. **/ - export interface ListCustomizationsParams { - /** The language for the custom voice models owned by the requesting service credentials that are to be returned. Omit the parameter to see all custom voice models owned by the requester. **/ - language?: ListCustomizationsConstants.Language | string; + /** Parameters for the `listVoiceModels` operation. **/ + export interface ListVoiceModelsParams { + /** The language for which custom voice models that are owned by the requesting service credentials are to be returned. Omit the parameter to see all custom voice models that are owned by the requester. **/ + language?: ListVoiceModelsConstants.Language | string; } - /** Constants for the `listCustomizations` operation. **/ - export namespace ListCustomizationsConstants { - /** The language for the custom voice models owned by the requesting service credentials that are to be returned. Omit the parameter to see all custom voice models owned by the requester. **/ + /** Constants for the `listVoiceModels` operation. **/ + export namespace ListVoiceModelsConstants { + /** The language for which custom voice models that are owned by the requesting service credentials are to be returned. Omit the parameter to see all custom voice models that are owned by the requester. **/ export enum Language { DE_DE = 'de-DE', EN_US = 'en-US', @@ -769,44 +887,25 @@ namespace GeneratedTextToSpeechV1 { } } - /** Parameters for the `updateCustomization` operation. **/ - export interface UpdateCustomizationParams { - /** GUID of the custom voice model to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** Parameters for the `updateVoiceModel` operation. **/ + export interface UpdateVoiceModelParams { + /** The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; - /** When you create a new custom voice model, you must specify the name of the new custom model. When you update an existing custom model, specify a name only if you want to change the model's name. **/ + /** A new name for the custom voice model. **/ name?: string; - /** When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. **/ - language?: UpdateCustomizationConstants.Language | string; - /** A description of the custom voice model. When you create a new custom voice model, specifying a description is recommended. When you update an existing custom model, specify a description only if you want to change the model's description. **/ + /** A new description for the custom voice model. **/ description?: string; - /** When you update an existing custom voice model, an array of words and their translations to be added to or updated in the custom voice model; pass an empty array to make no additions or updates. **Note:** When you create a new custom model, you cannot specify words for the new model. **/ - words?: Word[]; - } - - /** Constants for the `updateCustomization` operation. **/ - export namespace UpdateCustomizationConstants { - /** When you create a new custom voice model, the language of the new custom model; omit the parameter to use the the default language, `en-US`. **Note:** When you update an existing custom model, you cannot specify a language; you cannot change the language of an existing model. **/ - export enum Language { - DE_DE = 'de-DE', - EN_US = 'en-US', - EN_GB = 'en-GB', - ES_ES = 'es-ES', - ES_LA = 'es-LA', - ES_US = 'es-US', - FR_FR = 'fr-FR', - IT_IT = 'it-IT', - JA_JP = 'ja-JP', - PT_BR = 'pt-BR' - } + /** An array of words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. **/ + words?: CustomWord[]; } /** Parameters for the `addWord` operation. **/ export interface AddWordParams { - /** GUID of the custom voice model to which to which to add a word. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; - /** The word to be added to the custom voice model. **/ + /** The word that is to be added or updated for the custom voice model. **/ word: string; - /** Phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. **/ + /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. **/ translation: string; /** **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). **/ part_of_speech?: AddWordConstants.PartOfSpeech | string; @@ -838,188 +937,51 @@ namespace GeneratedTextToSpeechV1 { /** Parameters for the `addWords` operation. **/ export interface AddWordsParams { - /** GUID of the custom voice model to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; - /** When you add words to a custom voice model, you provide the required information for each word. When you list the words from a custom model, the service returns information about all words from the model in alphabetical order, with uppercase letters listed before lowercase letters; the array is empty if the custom model contains no words. **/ - words: Word[]; + /** An array of `CustomWord` objects that provides information about the words and their translations that are to be added or updated for the custom voice model. **/ + words: CustomWord[]; } /** Parameters for the `deleteWord` operation. **/ export interface DeleteWordParams { - /** GUID of the custom voice model from which to delete a word. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** The GUID of the custom voice model from which to delete a word. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; - /** The word to be deleted from the custom voice model. **/ + /** The word that is to be deleted from the custom voice model. **/ word: string; } /** Parameters for the `getWord` operation. **/ export interface GetWordParams { - /** GUID of the custom voice model in which to query a word. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** The GUID of the custom voice model from which to query a word. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; - /** The word to be queried from the custom voice model. **/ + /** The word that is to be queried from the custom voice model. **/ word: string; } /** Parameters for the `listWords` operation. **/ export interface ListWordsParams { - /** GUID of the custom voice model to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ + /** The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. **/ customization_id: string; } - /** Parameters for the `pronunciation` operation. **/ - export interface PronunciationParams { - /** The word for which the pronunciation is requested. **/ - text: string; - /** Specify a voice to obtain the pronunciation for the specified word in the language of that voice. All voices for the same language (for example, `en-US`) return the same translation. Do not specify both a `voice` and a `customization_id`. Retrieve available voices with the `GET /v1/voices` method. **/ - voice?: PronunciationConstants.Voice | string; - /** Specify the phoneme set in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. **/ - format?: PronunciationConstants.Format | string; - /** GUID of a custom voice model for which the pronunciation is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. If the word is not defined in the specified voice model, the service returns the default translation for the model's language. Omit the parameter to see the translation for the specified voice with no customization. Do not specify both a `voice` and a `customization_id`. **/ - customization_id?: string; - } - - /** Constants for the `pronunciation` operation. **/ - export namespace PronunciationConstants { - /** Specify a voice to obtain the pronunciation for the specified word in the language of that voice. All voices for the same language (for example, `en-US`) return the same translation. Do not specify both a `voice` and a `customization_id`. Retrieve available voices with the `GET /v1/voices` method. **/ - export enum Voice { - EN_US_ALLISONVOICE = 'en-US_AllisonVoice', - EN_US_LISAVOICE = 'en-US_LisaVoice', - EN_US_MICHAELVOICE = 'en-US_MichaelVoice', - EN_GB_KATEVOICE = 'en-GB_KateVoice', - ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', - ES_ES_LAURAVOICE = 'es-ES_LauraVoice', - ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', - ES_US_SOFIAVOICE = 'es-US_SofiaVoice', - DE_DE_DIETERVOICE = 'de-DE_DieterVoice', - DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', - FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', - IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', - JA_JP_EMIVOICE = 'ja-JP_EmiVoice', - PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' - } - /** Specify the phoneme set in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. **/ - export enum Format { - IPA = 'ipa', - IBM = 'ibm' - } - } - - /** Parameters for the `synthesize` operation. **/ - export interface SynthesizeParams { - /** Requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ - accept2?: SynthesizeConstants.Accept | string; - /** Requested audio format (MIME type) of the audio. You can use this query parameter or the `Accept` header to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ - accept?: SynthesizeConstants.Accept | string; - /** Selects a voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. **/ - voice?: SynthesizeConstants.Voice | string; - /** GUID of a custom voice model to be used for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. **/ - customization_id?: string; - /** Text to synthesize. **/ - text: string; - } - - /** Constants for the `synthesize` operation. **/ - export namespace SynthesizeConstants { - /** Requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). **/ - export enum Accept { - BASIC = 'audio/basic', - FLAC = 'audio/flac', - L16 = 'audio/l16', - OGG = 'audio/ogg', - OGG_CODECS_OPUS = 'audio/ogg;codecs=opus', - OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis', - MP3 = 'audio/mp3', - MPEG = 'audio/mpeg', - MULAW = 'audio/mulaw', - WAV = 'audio/wav', - WEBM = 'audio/webm', - WEBM_CODECS_OPUS = 'audio/webm:codecs=opus', - WEBM_CODECS_VORBIS = 'audio/webm:codecs=vorbis' - } - /** Selects a voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. **/ - export enum Voice { - EN_US_ALLISONVOICE = 'en-US_AllisonVoice', - EN_US_LISAVOICE = 'en-US_LisaVoice', - EN_US_MICHAELVOICE = 'en-US_MichaelVoice', - EN_GB_KATEVOICE = 'en-GB_KateVoice', - ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', - ES_ES_LAURAVOICE = 'es-ES_LauraVoice', - ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', - ES_US_SOFIAVOICE = 'es-US_SofiaVoice', - DE_DE_DIETERVOICE = 'de-DE_DieterVoice', - DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', - FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', - IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', - JA_JP_EMIVOICE = 'ja-JP_EmiVoice', - PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' - } - } - - /** Parameters for the `getVoice` operation. **/ - export interface GetVoiceParams { - /** The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. **/ - voice: GetVoiceConstants.Voice | string; - /** GUID of the custom voice model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. **/ - customization_id?: string; - } - - /** Constants for the `getVoice` operation. **/ - export namespace GetVoiceConstants { - /** The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. **/ - export enum Voice { - EN_US_ALLISONVOICE = 'en-US_AllisonVoice', - EN_US_LISAVOICE = 'en-US_LisaVoice', - EN_US_MICHAELVOICE = 'en-US_MichaelVoice', - EN_GB_KATEVOICE = 'en-GB_KateVoice', - ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice', - ES_ES_LAURAVOICE = 'es-ES_LauraVoice', - ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice', - ES_US_SOFIAVOICE = 'es-US_SofiaVoice', - DE_DE_DIETERVOICE = 'de-DE_DieterVoice', - DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice', - FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', - IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice', - JA_JP_EMIVOICE = 'ja-JP_EmiVoice', - PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' - } - } - - /** Parameters for the `listVoices` operation. **/ - export interface ListVoicesParams {} - /************************* * model interfaces ************************/ - /** Customization. **/ - export interface Customization { - /** GUID of the custom voice model. **Note:** When you create a new custom voice model, the service returns only the GUID of the new custom model; it does not return the other fields of this object. **/ - customization_id: string; - /** Name of the custom voice model. **/ - name?: string; - /** Language of the custom voice model. **/ - language?: string; - /** GUID of the service credentials for the instance of the service that owns the custom voice model. **/ - owner?: string; - /** The date and time in Coordinated Universal Time (UTC) at which the custom voice model was created. The value is provided in full ISO 8601 format (1YYYY-MM-DDThh:mm:ss.sTZD`). **/ - created?: string; - /** The date and time in Coordinated Universal Time (UTC) at which the custom voice model was last modified. Equals `created` when a new voice model is first added but has yet to be changed. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). **/ - last_modified?: string; - /** Description of the custom voice model. **/ - description?: string; - /** An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. **Note:** This field is returned only when you list information about a specific custom voice model. **/ - words?: Word[]; - } - - /** Customizations. **/ - export interface Customizations { - /** Array of all custom voice models owned by the requesting service credentials. The array is empty if the requester owns no custom models. **/ - customizations: Customization[]; + /** CustomWord. **/ + export interface CustomWord { + /** A word that is to be added or updated for the custom voice model. **/ + word: string; + /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. **/ + translation: string; + /** **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). **/ + part_of_speech?: string; } /** Pronunciation. **/ export interface Pronunciation { - /** Pronunciation of the requested text in the specified voice and format. **/ + /** The pronunciation of the requested text in the specified voice and format. **/ pronunciation: string; } @@ -1033,7 +995,7 @@ namespace GeneratedTextToSpeechV1 { /** Translation. **/ export interface Translation { - /** Phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. **/ + /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. **/ translation: string; /** **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). **/ part_of_speech?: string; @@ -1041,35 +1003,61 @@ namespace GeneratedTextToSpeechV1 { /** Voice. **/ export interface Voice { - /** URI of the voice. **/ + /** The URI of the voice. **/ url: string; - /** Gender of the voice: 'male' or 'female'. **/ + /** The gender of the voice: `male` or `female`. **/ gender: string; - /** Name of the voice. Use this as the voice identifier in all requests. **/ + /** The name of the voice. Use this as the voice identifier in all requests. **/ name: string; - /** Language and region of the voice (for example, 'en-US'). **/ + /** The language and region of the voice (for example, `en-US`). **/ language: string; - /** Textual description of the voice. **/ + /** A textual description of the voice. **/ description: string; /** If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as `custom_pronunciation`; maintained for backward compatibility.). **/ customizable: boolean; /** Describes the additional service features supported with the voice. **/ supported_features: SupportedFeatures; /** Returns information about a specified custom voice model. **Note:** This field is returned only when you list information about a specific voice and specify the GUID of a custom voice model that is based on that voice. **/ - customization?: Customization; + customization?: VoiceModel; + } + + /** VoiceModel. **/ + export interface VoiceModel { + /** The customization ID (GUID) of the custom voice model. **Note:** When you create a new custom voice model, the service returns only the GUID of the new custom model; it does not return the other fields of this object. **/ + customization_id: string; + /** The name of the custom voice model. **/ + name?: string; + /** The language identifier of the custom voice model (for example, `en-US`). **/ + language?: string; + /** The GUID of the service credentials for the instance of the service that owns the custom voice model. **/ + owner?: string; + /** The date and time in Coordinated Universal Time (UTC) at which the custom voice model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). **/ + created?: string; + /** The date and time in Coordinated Universal Time (UTC) at which the custom voice model was last modified. Equals `created` when a new voice model is first added but has yet to be updated. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). **/ + last_modified?: string; + /** The description of the custom voice model. **/ + description?: string; + /** An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. **Note:** This field is returned only when you list information about a specific custom voice model. **/ + words?: Word[]; + } + + /** VoiceModels. **/ + export interface VoiceModels { + /** An array of `VoiceModel` objects that provides information about each available custom voice model. The array is empty if the requesting service credentials own no custom voice models (if no language is specified) or own no custom voice models for the specified language. **/ + customizations: VoiceModel[]; } - /** Description of the available voices. **/ + /** Voices. **/ export interface Voices { - /** List of voices. **/ + /** A list of available voices. **/ voices: Voice[]; } /** Word. **/ export interface Word { - /** Word from the custom voice model. **/ + /** A word from the custom voice model. **/ word: string; - /** Phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. **/ + /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. **/ translation: string; /** **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). **/ part_of_speech?: string; @@ -1077,7 +1065,7 @@ namespace GeneratedTextToSpeechV1 { /** Words. **/ export interface Words { - /** When you add words to a custom voice model, you provide the required information for each word. When you list the words from a custom model, the service returns information about all words from the model in alphabetical order, with uppercase letters listed before lowercase letters; the array is empty if the custom model contains no words. **/ + /** An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. **/ words: Word[]; } } diff --git a/text-to-speech/v1.ts b/text-to-speech/v1.ts index 451578169d..78042dc81d 100644 --- a/text-to-speech/v1.ts +++ b/text-to-speech/v1.ts @@ -6,7 +6,23 @@ class TextToSpeechV1 extends GeneratedTextToSpeechV1 { } getCustomizations(params, callback) { - return super.listCustomizations(params, callback); + return super.listVoiceModels(params, callback); + } + + getCustomization(params, callback) { + return super.getVoiceModel(params, callback); + } + + updateCustomization(params, callback) { + return super.updateVoiceModel(params, callback); + } + + deleteCustomization(params, callback) { + return super.deleteVoiceModel(params, callback); + } + + createCustomization(params, callback) { + return super.createVoiceModel(params, callback); } getWords(params, callback) { @@ -21,6 +37,10 @@ class TextToSpeechV1 extends GeneratedTextToSpeechV1 { return super.getVoice(params, callback); } + pronunciation(params, callback) { + return super.getPronunciation(params, callback); + } + /** * Repair the WAV header of an audio/wav file. * diff --git a/tone-analyzer/v3-generated.ts b/tone-analyzer/v3-generated.ts index 30fc752271..470c64cc1c 100644 --- a/tone-analyzer/v3-generated.ts +++ b/tone-analyzer/v3-generated.ts @@ -21,18 +21,17 @@ import { getMissingParams } from '../lib/helper'; import { BaseService } from '../lib/base_service'; /** - * ### Service Overview The IBM Watson Tone Analyzer service uses linguistic analysis to detect emotional and language tones in written text. The service can analyze tone at both the document and sentence levels. You can use the service to understand how your written communications are perceived and then to improve the tone of your communications. Businesses can use the service to learn the tone of their customers' communications and to respond to each customer appropriately, or to understand and improve their customer conversations. ### API Usage The following information provides details about using the service to analyze tone: * **The tone method:** The service offers `GET` and `POST /v3/tone` methods that use the general purpose endpoint to analyze the tone of input content. The methods accept content in JSON, plain text, or HTML format. * **The tone_chat method:** The service offers a `POST /v3/tone_chat` method that uses the customer engagement endpoint to analyze the tone of customer service and customer support conversations. The method accepts content in JSON format. * **Authentication:** You authenticate to the service by using your service credentials. You can use your credentials to authenticate via a proxy server that resides in Bluemix, or you can use your credentials to obtain a token and contact the service directly. See [Service credentials for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) and [Tokens for authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). * **Request Logging:** By default, all Watson services log requests and their results. Data is collected only to improve the Watson services. If you do not want to share your data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. Data is collected for any request that omits this header. See [Controlling request logging for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). For more information about the service, see [About Tone Analyzer](https://console.bluemix.net/docs/services/tone-analyzer/index.html). **Note:** Method descriptions apply to the latest version of the interface, `2017-09-21`. Where necessary, parameters and models describe differences between versions `2017-09-21` and `2016-05-19`. + * ### Service Overview The IBM Watson Tone Analyzer service uses linguistic analysis to detect emotional and language tones in written text. The service can analyze tone at both the document and sentence levels. You can use the service to understand how your written communications are perceived and then to improve the tone of your communications. Businesses can use the service to learn the tone of their customers' communications and to respond to each customer appropriately, or to understand and improve their customer conversations. ### API Usage The following information provides details about using the service to analyze tone: * **The tone method:** The service offers `GET` and `POST /v3/tone` methods that use the general purpose endpoint to analyze the tone of input content. The methods accept content in JSON, plain text, or HTML format. * **The tone_chat method:** The service offers a `POST /v3/tone_chat` method that uses the customer engagement endpoint to analyze the tone of customer service and customer support conversations. The method accepts content in JSON format. * **Authentication:** You authenticate to the service by using your service credentials. You can use your credentials to authenticate via a proxy server that resides in IBM Cloud, or you can use your credentials to obtain a token and contact the service directly. See [Service credentials for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) and [Tokens for authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). * **Request Logging:** By default, all Watson services log requests and their results. Data is collected only to improve the Watson services. If you do not want to share your data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. Data is collected for any request that omits this header. See [Controlling request logging for Watson services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). For more information about the service, see [About Tone Analyzer](https://console.bluemix.net/docs/services/tone-analyzer/index.html). **Note:** Method descriptions apply to the latest version of the interface, `2017-09-21`. Where necessary, parameters and models describe differences between versions `2017-09-21` and `2016-05-19`. */ -class ToneAnalyzerV3 extends BaseService { - +class GeneratedToneAnalyzerV3 extends BaseService { name: string; // set by prototype to 'tone_analyzer' version: string; // set by prototype to 'v3' static URL: string = 'https://gateway.watsonplatform.net/tone-analyzer/api'; /** - * Construct a ToneAnalyzerV3 object. + * Construct a GeneratedToneAnalyzerV3 object. * * @param {Object} options - Options for the service. * @param {String} options.version_date - The API version date to use with the service, in "YYYY-MM-DD" format. Whenever the API is changed in a backwards incompatible way, a new minor version of the API is released. The service uses the API version for the date you specify, or the most recent version before that date. Note that you should not programmatically specify the current date at runtime, in case the API has been updated since your application's release. Instead, specify a version date that is compatible with your application, and don't change it until your application is ready for a later version. @@ -43,10 +42,10 @@ class ToneAnalyzerV3 extends BaseService { * @param {Object} [options.headers] - Default headers that shall be included with every request to the service. * @param {Object} [options.headers.X-Watson-Learning-Opt-Out] - Set to `true` to opt-out of data collection. By default, all IBM Watson services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public. If you are concerned with protecting the privacy of users' personal information or otherwise do not want your requests to be logged, you can opt out of logging. * @constructor - * @returns {ToneAnalyzerV3} + * @returns {GeneratedToneAnalyzerV3} * @throws {Error} */ - constructor(options: ToneAnalyzerV3.Options) { + constructor(options: GeneratedToneAnalyzerV3.Options) { super(options); // check if 'version_date' was provided if (typeof this._options.version_date === 'undefined') { @@ -74,16 +73,21 @@ class ToneAnalyzerV3 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - tone(params: ToneAnalyzerV3.ToneParams, callback?: ToneAnalyzerV3.Callback): ReadableStream | void { + tone( + params: GeneratedToneAnalyzerV3.ToneParams, + callback?: GeneratedToneAnalyzerV3.Callback< + GeneratedToneAnalyzerV3.ToneAnalysis + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['tone_input', 'content_type']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } const body = _params.tone_input; - const query = { + const query = { sentences: _params.sentences, tones: _params.tones }; @@ -91,21 +95,21 @@ class ToneAnalyzerV3 extends BaseService { options: { url: '/v3/tone', method: 'POST', - json: (_params.content_type === 'application/json'), + json: _params.content_type === 'application/json', body: body, - qs: query, + qs: query }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': _params.content_type, - 'content-language': _params.content_language, - 'accept-language': _params.accept_language + Accept: 'application/json', + 'Content-Type': _params.content_type, + 'Content-Language': _params.content_language, + 'Accept-Language': _params.accept_language } }) }; return createRequest(parameters, _callback); - }; + } /** * Analyze customer engagement tone. @@ -118,15 +122,20 @@ class ToneAnalyzerV3 extends BaseService { * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ - toneChat(params: ToneAnalyzerV3.ToneChatParams, callback?: ToneAnalyzerV3.Callback): ReadableStream | void { + toneChat( + params: GeneratedToneAnalyzerV3.ToneChatParams, + callback?: GeneratedToneAnalyzerV3.Callback< + GeneratedToneAnalyzerV3.UtteranceAnalyses + > + ): ReadableStream | void { const _params = extend({}, params); - const _callback = (callback) ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['utterances']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { return _callback(missingParams); } - const body = { + const body = { utterances: _params.utterances }; const parameters = { @@ -134,34 +143,29 @@ class ToneAnalyzerV3 extends BaseService { url: '/v3/tone_chat', method: 'POST', json: true, - body: body, + body: body }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - 'accept': 'application/json', - 'content-type': 'application/json', - 'accept-language': _params.accept_language + Accept: 'application/json', + 'Content-Type': 'application/json', + 'Accept-Language': _params.accept_language } }) }; return createRequest(parameters, _callback); - }; - + } } -ToneAnalyzerV3.prototype.name = 'tone_analyzer'; -ToneAnalyzerV3.prototype.version = 'v3'; +GeneratedToneAnalyzerV3.prototype.name = 'tone_analyzer'; +GeneratedToneAnalyzerV3.prototype.version = 'v3'; /************************* * interfaces ************************/ -namespace ToneAnalyzerV3 { - - export interface Empty { } - - export type Callback = (error: any, body?: T, response?: RequestResponse) => void; - +namespace GeneratedToneAnalyzerV3 { + /** Options for the `GeneratedToneAnalyzerV3` constructor. **/ export type Options = { version_date: string; url?: string; @@ -169,31 +173,52 @@ namespace ToneAnalyzerV3 { password?: string; use_unauthenticated?: boolean; headers?: object; - } + }; + + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} /************************* * request interfaces ************************/ + /** Parameters for the `tone` operation. **/ export interface ToneParams { - tone_input: ToneInput|string; + /** JSON, plain text, or HTML input that contains the content to be analyzed. For JSON input, provide an object of type `ToneInput`. **/ + tone_input: ToneInput | string; + /** The type of the input: application/json, text/plain, or text/html. A character encoding can be specified by including a `charset` parameter. For example, 'text/plain;charset=utf-8'. **/ content_type: ToneConstants.ContentType | string; + /** Indicates whether the service is to return an analysis of each individual sentence in addition to its analysis of the full document. If `true` (the default), the service returns results for each sentence. **/ sentences?: boolean; + /** **`2017-09-21`:** Deprecated. The service continues to accept the parameter for backward-compatibility, but the parameter no longer affects the response. **`2016-05-19`:** A comma-separated list of tones for which the service is to return its analysis of the input; the indicated tones apply both to the full document and to individual sentences of the document. You can specify one or more of the valid values. Omit the parameter to request results for all three tones. **/ tones?: string[]; + /** The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can specify any combination of languages for `content_language` and `Accept-Language`. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. **/ content_language?: ToneConstants.ContentLanguage | string; + /** The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for `Content-Language` and `accept_language`. **/ accept_language?: ToneConstants.AcceptLanguage | string; } + /** Constants for the `tone` operation. **/ export namespace ToneConstants { + /** The type of the input: application/json, text/plain, or text/html. A character encoding can be specified by including a `charset` parameter. For example, 'text/plain;charset=utf-8'. **/ export enum ContentType { APPLICATION_JSON = 'application/json', TEXT_PLAIN = 'text/plain', - TEXT_HTML = 'text/html', + TEXT_HTML = 'text/html' } + /** The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can specify any combination of languages for `content_language` and `Accept-Language`. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. **/ export enum ContentLanguage { EN = 'en', - FR = 'fr', + FR = 'fr' } + /** The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for `Content-Language` and `accept_language`. **/ export enum AcceptLanguage { AR = 'ar', DE = 'de', @@ -205,16 +230,21 @@ namespace ToneAnalyzerV3 { KO = 'ko', PT_BR = 'pt-br', ZH_CN = 'zh-cn', - ZH_TW = 'zh-tw', + ZH_TW = 'zh-tw' } } + /** Parameters for the `toneChat` operation. **/ export interface ToneChatParams { + /** An array of `Utterance` objects that provides the input content that the service is to analyze. **/ utterances: Utterance[]; + /** The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. **/ accept_language?: ToneChatConstants.AcceptLanguage | string; } + /** Constants for the `toneChat` operation. **/ export namespace ToneChatConstants { + /** The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. **/ export enum AcceptLanguage { AR = 'ar', DE = 'de', @@ -226,7 +256,7 @@ namespace ToneAnalyzerV3 { KO = 'ko', PT_BR = 'pt-br', ZH_CN = 'zh-cn', - ZH_TW = 'zh-tw', + ZH_TW = 'zh-tw' } } @@ -234,65 +264,103 @@ namespace ToneAnalyzerV3 { * model interfaces ************************/ + /** DocumentAnalysis. **/ export interface DocumentAnalysis { + /** **`2017-09-21`:** An array of `ToneScore` objects that provides the results of the analysis for each qualifying tone of the document. The array includes results for any tone whose score is at least 0.5. The array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not returned. **/ tones?: ToneScore[]; + /** **`2017-09-21`:** Not returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the tone analysis for the full document of the input content. The service returns results only for the tones specified with the `tones` parameter of the request. **/ tone_categories?: ToneCategory[]; + /** **`2017-09-21`:** A warning message if the overall content exceeds 128 KB or contains more than 1000 sentences. The service analyzes only the first 1000 sentences for document-level analysis and the first 100 sentences for sentence-level analysis. **`2016-05-19`:** Not returned. **/ warning?: string; } + /** SentenceAnalysis. **/ export interface SentenceAnalysis { + /** The unique identifier of a sentence of the input content. The first sentence has ID 0, and the ID of each subsequent sentence is incremented by one. **/ sentence_id: number; + /** The text of the input sentence. **/ text: string; + /** **`2017-09-21`:** An array of `ToneScore` objects that provides the results of the analysis for each qualifying tone of the sentence. The array includes results for any tone whose score is at least 0.5. The array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not returned. **/ tones?: ToneScore[]; + /** **`2017-09-21`:** Not returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the tone analysis for the sentence. The service returns results only for the tones specified with the `tones` parameter of the request. **/ tone_categories?: ToneCategory[]; + /** **`2017-09-21`:** Not returned. **`2016-05-19`:** The offset of the first character of the sentence in the overall input content. **/ input_from?: number; + /** **`2017-09-21`:** Not returned. **`2016-05-19`:** The offset of the last character of the sentence in the overall input content. **/ input_to?: number; } + /** ToneAnalysis. **/ export interface ToneAnalysis { + /** An object of type `DocumentAnalysis` that provides the results of the analysis for the full input document. **/ document_tone: DocumentAnalysis; + /** An array of `SentenceAnalysis` objects that provides the results of the analysis for the individual sentences of the input content. The service returns results only for the first 100 sentences of the input. The field is omitted if the `sentences` parameter of the request is set to `false`. **/ sentences_tone?: SentenceAnalysis[]; } + /** ToneCategory. **/ export interface ToneCategory { + /** An array of `ToneScore` objects that provides the results for the tones of the category. **/ tones: ToneScore[]; + /** The unique, non-localized identifier of the category for the results. The service can return results for the following category IDs: `emotion_tone`, `language_tone`, and `social_tone`. **/ category_id: string; + /** The user-visible, localized name of the category. **/ category_name: string; } + /** ToneChatScore. **/ export interface ToneChatScore { + /** The score for the tone in the range of 0.5 to 1. A score greater than 0.75 indicates a high likelihood that the tone is perceived in the utterance. **/ score: number; + /** The unique, non-localized identifier of the tone for the results. The service can return results for the following tone IDs: `sad`, `frustrated`, `satisfied`, `excited`, `polite`, `impolite`, and `sympathetic`. The service returns results only for tones whose scores meet a minimum threshold of 0.5. **/ tone_id: string; + /** The user-visible, localized name of the tone. **/ tone_name: string; } + /** ToneInput. **/ export interface ToneInput { + /** The input content that the service is to analyze. **/ text: string; } + /** ToneScore. **/ export interface ToneScore { + /** The score for the tone. * **`2017-09-21`:** The score that is returned lies in the range of 0.5 to 1. A score greater than 0.75 indicates a high likelihood that the tone is perceived in the content. * **`2016-05-19`:** The score that is returned lies in the range of 0 to 1. A score less than 0.5 indicates that the tone is unlikely to be perceived in the content; a score greater than 0.75 indicates a high likelihood that the tone is perceived. **/ score: number; + /** The unique, non-localized identifier of the tone. * **`2017-09-21`:** The service can return results for the following tone IDs: `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`, `confident`, and `tentative` (language tones). The service returns results only for tones whose scores meet a minimum threshold of 0.5. * **`2016-05-19`:** The service can return results for the following tone IDs of the different categories: for the `emotion` category: `anger`, `disgust`, `fear`, `joy`, and `sadness`; for the `language` category: `analytical`, `confident`, and `tentative`; for the `social` category: `openness_big5`, `conscientiousness_big5`, `extraversion_big5`, `agreeableness_big5`, and `emotional_range_big5`. The service returns scores for all tones of a category, regardless of their values. **/ tone_id: string; + /** The user-visible, localized name of the tone. **/ tone_name: string; } + /** Utterance. **/ export interface Utterance { + /** An utterance contributed by a user in the conversation that is to be analyzed. The utterance can contain multiple sentences. **/ text: string; + /** A string that identifies the user who contributed the utterance specified by the `text` parameter. **/ user?: string; } + /** UtteranceAnalyses. **/ export interface UtteranceAnalyses { + /** An array of `UtteranceAnalysis` objects that provides the results for each utterance of the input. **/ utterances_tone: UtteranceAnalysis[]; + /** **`2017-09-21`:** A warning message if the content contains more than 50 utterances. The service analyzes only the first 50 utterances. **`2016-05-19`:** Not returned. **/ warning?: string; } + /** UtteranceAnalysis. **/ export interface UtteranceAnalysis { + /** The unique identifier of the utterance. The first utterance has ID 0, and the ID of each subsequent utterance is incremented by one. **/ utterance_id: string; + /** The text of the utterance. **/ utterance_text: string; + /** An array of `ToneChatScore` objects that provides results for the most prevalent tones of the utterance. The array includes results for any tone whose score is at least 0.5. The array is empty if no tone has a score that meets this threshold. **/ tones: ToneChatScore[]; + /** **`2017-09-21`:** An error message if the utterance contains more than 500 characters. The service does not analyze the utterance. **`2016-05-19`:** Not returned. **/ error?: string; } - } -export = ToneAnalyzerV3; +export = GeneratedToneAnalyzerV3; diff --git a/visual-recognition/v3-generated.ts b/visual-recognition/v3-generated.ts index f6dfd27c62..fb1c0445d3 100644 --- a/visual-recognition/v3-generated.ts +++ b/visual-recognition/v3-generated.ts @@ -22,11 +22,11 @@ import { BaseService } from '../lib/base_service'; import { FileObject } from '../lib/helper'; /** - * **Important**: As of September 8, 2017, the beta period for Similarity Search is closed. For more information, see [Visual Recognition API – Similarity Search Update](https://www.ibm.com/blogs/bluemix/2017/08/visual-recognition-api-similarity-search-update). The IBM Watson Visual Recognition service uses deep learning algorithms to identify scenes, objects, and faces in images you upload to the service. You can create and train a custom classifier to identify subjects that suit your needs. **Tip**: To test calls to the **Custom classifiers** methods with the API explorer, provide your `api_key` from your Bluemix service instance. + * **Important:** As of September 8, 2017, the beta period for Similarity Search is closed. For more information, see [Visual Recognition API – Similarity Search Update](https://www.ibm.com/blogs/bluemix/2017/08/visual-recognition-api-similarity-search-update). The IBM Watson Visual Recognition service uses deep learning algorithms to identify scenes, objects, and faces in images you upload to the service. You can create and train a custom classifier to identify subjects that suit your needs. **Tip:** To test calls to the **Custom classifiers** methods with the API explorer, provide your `api_key` from your IBM® Cloud service instance. */ class GeneratedVisualRecognitionV3 extends BaseService { - name: string; // set by prototype to 'watson_vision_combined' + name: string; // set by prototype to 'visual_recognition' version: string; // set by prototype to 'v3' static VERSION_DATE_2016_05_20: string = '2016-05-20'; @@ -51,16 +51,20 @@ class GeneratedVisualRecognitionV3 extends BaseService { super(options); // check if 'version_date' was provided if (typeof this._options.version_date === 'undefined') { - throw new Error( - 'Argument error: version_date was not specified, use GeneratedVisualRecognitionV3.VERSION_DATE_2016_05_20' - ); + throw new Error('Argument error: version_date was not specified'); } this._options.qs.version = options.version_date; } + /************************* + * classify + ************************/ + /** * Classify images. * + * Classify images with the built-in classes. You can analyze images against the built-in classifiers or against an array of classifier IDs. To identify custom classifiers, include the **classifier_ids** or **owners** parameters. + * * @param {Object} [params] - The parameters to send to the service. * @param {ReadableStream|FileObject|Buffer} [params.images_file] - An image file (.jpg, .png) or .zip file with images. Include no more than 20 images and limit the .zip file to 5 MB. You can also include images with the `url` property in the **parameters** object. * @param {string} [params.parameters] - Specifies input parameters. The parameter can include these inputs in a JSON object: - url: A string with the image URL to analyze. You can also include images in the **images_file** parameter. - classifier_ids: An array of classifier IDs to classify the images against. - owners: An array with the values IBM, me, or both to specify which classifiers to run. - threshold: A floating point value that specifies the minimum score a class must have to be displayed in the response. For example: {"url": "...", "classifier_ids": ["...","..."], "owners": ["IBM", "me"], "threshold": 0.4}. @@ -75,8 +79,12 @@ class GeneratedVisualRecognitionV3 extends BaseService { GeneratedVisualRecognitionV3.ClassifiedImages > ): ReadableStream | void { - const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const formData = { images_file: { data: _params.images_file, @@ -90,11 +98,11 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'POST', formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data', - 'accept-language': _params.accept_language + Accept: 'application/json', + 'Content-Type': 'multipart/form-data', + 'Accept-Language': _params.accept_language } }) }; @@ -102,7 +110,9 @@ class GeneratedVisualRecognitionV3 extends BaseService { } /** - * Detect faces in an image. + * Detect faces in images. + * + * Analyze and get data about faces in images. Responses can include estimated age and gender, and the service can identify celebrities. This feature uses a built-in classifier, so you do not train it on custom classifiers. The Detect faces method does not support general biometric facial recognition. * * @param {Object} [params] - The parameters to send to the service. * @param {ReadableStream|FileObject|Buffer} [params.images_file] - An image file (.jpg, .png) or .zip file with images. Include no more than 15 images. You can also include images with the `url` property in the **parameters** object. All faces are detected, but if there are more than 10 faces in an image, age and gender confidence scores might return scores of 0. @@ -117,8 +127,12 @@ class GeneratedVisualRecognitionV3 extends BaseService { GeneratedVisualRecognitionV3.DetectedFaces > ): ReadableStream | void { - const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const formData = { images_file: { data: _params.images_file, @@ -132,22 +146,28 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'POST', formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + Accept: 'application/json', + 'Content-Type': 'multipart/form-data' } }) }; return createRequest(parameters, _callback); } + /************************* + * customClassifiers + ************************/ + /** * Create a classifier. * + * Train a new multi-faceted classifier on the uploaded image data. Create your custom classifier with positive or negative examples. Include at least two sets of examples, either two positive example files or one positive and one negative file. You can upload a maximum of 256 MB per call. + * * @param {Object} params - The parameters to send to the service. * @param {string} params.name - The name of the new classifier. Cannot contain special characters. - * @param {ReadableStream|FileObject|Buffer} params._positive_examples - A compressed (.zip) file of images that depict the visual subject for a class within the new classifier. Must contain a minimum of 10 images. The swagger limits you to training only one class. To train more classes, use the API functionality. + * @param {ReadableStream|FileObject|Buffer} params.classname_positive_examples - A .zip file of images that depict the visual subject of a class in the new classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used as the class name. For example, `goldenretriever_positive_examples` creates the class **goldenretriever**. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. The API explorer limits you to training only one class. To train more classes, use the API functionality. * @param {ReadableStream|FileObject|Buffer} [params.negative_examples] - A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} @@ -159,7 +179,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { > ): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = callback ? callback : () => {}; const _positive_example_classes = Object.keys(_params).filter(key => { return key.match(/^.+positive_examples$/); }) || ['_positive_examples']; @@ -187,10 +207,10 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'POST', formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + Accept: 'application/json', + 'Content-Type': 'multipart/form-data' } }) }; @@ -198,7 +218,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { } /** - * Delete a custom classifier. + * Delete a classifier. * * @param {Object} params - The parameters to send to the service. * @param {string} params.classifier_id - The ID of the classifier. @@ -212,7 +232,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { > ): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['classifier_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -227,10 +247,10 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'DELETE', path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -238,7 +258,9 @@ class GeneratedVisualRecognitionV3 extends BaseService { } /** - * Retrieve information about a custom classifier. + * Retrieve classifier details. + * + * Retrieve information about a user-created classifier. * * @param {Object} params - The parameters to send to the service. * @param {string} params.classifier_id - The ID of the classifier. @@ -252,7 +274,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { > ): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = callback ? callback : () => {}; const requiredParams = ['classifier_id']; const missingParams = getMissingParams(_params, requiredParams); if (missingParams) { @@ -267,10 +289,10 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'GET', path: path }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -281,7 +303,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { * Retrieve a list of custom classifiers. * * @param {Object} [params] - The parameters to send to the service. - * @param {boolean} [params.verbose] - Specify true to return classifier details. Omit this parameter to return a brief list of classifiers. + * @param {boolean} [params.verbose] - Specify `true` to return classifier details. Omit this parameter to return a brief list of classifiers. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} */ @@ -291,8 +313,12 @@ class GeneratedVisualRecognitionV3 extends BaseService { GeneratedVisualRecognitionV3.Classifiers > ): ReadableStream | void { - const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _params = + typeof params === 'function' && !callback ? {} : extend({}, params); + const _callback = + typeof params === 'function' && !callback + ? params + : callback ? callback : () => {}; const query = { verbose: _params.verbose }; @@ -302,10 +328,10 @@ class GeneratedVisualRecognitionV3 extends BaseService { method: 'GET', qs: query }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'application/json' + Accept: 'application/json', + 'Content-Type': 'application/json' } }) }; @@ -315,9 +341,11 @@ class GeneratedVisualRecognitionV3 extends BaseService { /** * Update a classifier. * + * Update a custom classifier by adding new positive or negative classes (examples) or by adding new images to existing classes. You must supply at least one set of positive or negative examples. For details, see [Updating custom classifiers](https://console.bluemix.net/docs/services/visual-recognition/customizing.html#updating-custom-classifiers). **Important:** You can't update a custom classifier with a free API key. If you have a free key, you can upgrade to a Standard plan. For details, see [Changing your plan](https://console.bluemix.net/docs/pricing/changing_plan.html). **Tip:** Don't make retraining calls on a classifier until the status is ready. When you submit retraining requests in parallel, the last request overwrites the previous requests. The retrained property shows the last time the classifier retraining finished. + * * @param {Object} params - The parameters to send to the service. * @param {string} params.classifier_id - The ID of the classifier. - * @param {ReadableStream|FileObject|Buffer} [params._positive_examples] - A compressed (.zip) file of images that depict the visual subject for a class within the classifier. Must contain a minimum of 10 images. + * @param {ReadableStream|FileObject|Buffer} [params.classname_positive_examples] - A .zip file of images that depict the visual subject of a class in the classifier. The positive examples create or update classes in the classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used to name the class. For example, `goldenretriever_positive_examples` creates the class `goldenretriever`. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. * @param {ReadableStream|FileObject|Buffer} [params.negative_examples] - A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. * @param {Function} [callback] - The callback that handles the response. * @returns {ReadableStream|void} @@ -329,7 +357,7 @@ class GeneratedVisualRecognitionV3 extends BaseService { > ): ReadableStream | void { const _params = extend({}, params); - const _callback = typeof callback === 'function' ? callback : () => {}; + const _callback = callback ? callback : () => {}; const _positive_example_classes = Object.keys(_params).filter(key => { return key.match(/^.+positive_examples$/); }); @@ -360,10 +388,10 @@ class GeneratedVisualRecognitionV3 extends BaseService { path: path, formData: formData }, - defaultOptions: extend(true, this._options, { + defaultOptions: extend(true, {}, this._options, { headers: { - accept: 'application/json', - 'content-type': 'multipart/form-data' + Accept: 'application/json', + 'Content-Type': 'multipart/form-data' } }) }; @@ -374,15 +402,12 @@ class GeneratedVisualRecognitionV3 extends BaseService { GeneratedVisualRecognitionV3.prototype.name = 'visual_recognition'; GeneratedVisualRecognitionV3.prototype.version = 'v3'; -namespace GeneratedVisualRecognitionV3 { - export interface Empty {} - - export type Callback = ( - error: any, - body?: T, - response?: RequestResponse - ) => void; +/************************* + * interfaces + ************************/ +namespace GeneratedVisualRecognitionV3 { + /** Options for the `GeneratedVisualRecognitionV3` constructor. **/ export type Options = { version_date: string; url?: string; @@ -393,14 +418,35 @@ namespace GeneratedVisualRecognitionV3 { headers?: object; }; + /** The callback for a service request. **/ + export type Callback = ( + error: any, + body?: T, + response?: RequestResponse + ) => void; + + /** The body of a service request that returns no response data. **/ + export interface Empty {} + + /************************* + * request interfaces + ************************/ + + /** Parameters for the `classify` operation. **/ export interface ClassifyParams { + /** An image file (.jpg, .png) or .zip file with images. Include no more than 20 images and limit the .zip file to 5 MB. You can also include images with the `url` property in the **parameters** object. **/ images_file?: ReadableStream | FileObject | Buffer; + /** Specifies input parameters. The parameter can include these inputs in a JSON object: - url: A string with the image URL to analyze. You can also include images in the **images_file** parameter. - classifier_ids: An array of classifier IDs to classify the images against. - owners: An array with the values IBM, me, or both to specify which classifiers to run. - threshold: A floating point value that specifies the minimum score a class must have to be displayed in the response. For example: {"url": "...", "classifier_ids": ["...","..."], "owners": ["IBM", "me"], "threshold": 0.4}. **/ parameters?: string; + /** Specifies the language of the output class names. Can be `en` (English), `ar` (Arabic), `de` (German), `es` (Spanish), `it` (Italian), `ja` (Japanese), or `ko` (Korean). Classes for which no translation is available are omitted. The response might not be in the specified language under these conditions: - English is returned when the requested language is not supported. - Classes are not returned when there is no translation for them. - Custom classifiers returned with this method return tags in the language of the custom classifier. **/ accept_language?: ClassifyConstants.AcceptLanguage | string; + /** The content type of images_file. **/ images_file_content_type?: string; } + /** Constants for the `classify` operation. **/ export namespace ClassifyConstants { + /** Specifies the language of the output class names. Can be `en` (English), `ar` (Arabic), `de` (German), `es` (Spanish), `it` (Italian), `ja` (Japanese), or `ko` (Korean). Classes for which no translation is available are omitted. The response might not be in the specified language under these conditions: - English is returned when the requested language is not supported. - Classes are not returned when there is no translation for them. - Custom classifiers returned with this method return tags in the language of the custom classifier. **/ export enum AcceptLanguage { EN = 'en', AR = 'ar', @@ -412,92 +458,150 @@ namespace GeneratedVisualRecognitionV3 { } } + /** Parameters for the `detectFaces` operation. **/ export interface DetectFacesParams { + /** An image file (.jpg, .png) or .zip file with images. Include no more than 15 images. You can also include images with the `url` property in the **parameters** object. All faces are detected, but if there are more than 10 faces in an image, age and gender confidence scores might return scores of 0. **/ images_file?: ReadableStream | FileObject | Buffer; + /** A JSON string containing the image URL to analyze. For example: {"url": "..."}. **/ parameters?: string; + /** The content type of images_file. **/ images_file_content_type?: string; } + /** Parameters for the `createClassifier` operation. **/ export interface CreateClassifierParams { + /** The name of the new classifier. Cannot contain special characters. **/ name: string; + /** A .zip file of images that depict the visual subject of a class in the new classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used as the class name. For example, `goldenretriever_positive_examples` creates the class **goldenretriever**. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. The API explorer limits you to training only one class. To train more classes, use the API functionality. **/ classname_positive_examples: ReadableStream | FileObject | Buffer; + /** A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. **/ negative_examples?: ReadableStream | FileObject | Buffer; } + /** Parameters for the `deleteClassifier` operation. **/ export interface DeleteClassifierParams { + /** The ID of the classifier. **/ classifier_id: string; } + /** Parameters for the `getClassifier` operation. **/ export interface GetClassifierParams { + /** The ID of the classifier. **/ classifier_id: string; } + /** Parameters for the `listClassifiers` operation. **/ export interface ListClassifiersParams { + /** Specify `true` to return classifier details. Omit this parameter to return a brief list of classifiers. **/ verbose?: boolean; } + /** Parameters for the `updateClassifier` operation. **/ export interface UpdateClassifierParams { + /** The ID of the classifier. **/ classifier_id: string; + /** A .zip file of images that depict the visual subject of a class in the classifier. The positive examples create or update classes in the classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used to name the class. For example, `goldenretriever_positive_examples` creates the class `goldenretriever`. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. **/ classname_positive_examples?: ReadableStream | FileObject | Buffer; + /** A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. **/ negative_examples?: ReadableStream | FileObject | Buffer; } + /************************* + * model interfaces + ************************/ + + /** A category within a classifier. **/ export interface Class { + /** The name of the class. **/ class_name: string; } + /** Result of a class within a classifier. **/ export interface ClassResult { + /** The name of the class. **/ class_name: string; + /** Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. **/ score?: number; + /** Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. **/ type_hierarchy?: string; } + /** Classifier results for one image. **/ export interface ClassifiedImage { + /** Source of the image before any redirects. Not returned when the image is uploaded. **/ source_url?: string; + /** Fully resolved URL of the image after redirects are followed. Not returned when the image is uploaded. **/ resolved_url?: string; + /** Relative path of the image file if uploaded directly. Not returned when the image is passed by URL. **/ image?: string; error?: ErrorInfo; classifiers: ClassifierResult[]; } + /** Classify results for multiple images. **/ export interface ClassifiedImages { + /** The number of custom classes identified in the images. **/ custom_classes?: number; + /** Number of images processed for the API call. **/ images_processed?: number; + /** The array of classified images. **/ images: ClassifiedImage[]; + /** Information about what might cause less than optimal output. For example, a request sent with a corrupt .zip file and a list of image URLs will still complete, but does not return the expected output. Not returned when there is no warning. **/ warnings?: WarningInfo[]; } + /** Information about a classifier. **/ export interface Classifier { + /** The ID of the classifier. **/ classifier_id: string; + /** The name of the classifier. **/ name: string; + /** Unique ID of the account who owns the classifier. **/ owner?: string; + /** The training status of classifier. **/ status?: string; + /** If classifier training has failed, this field may explain why. **/ explanation?: string; + /** The time and date when classifier was created. **/ created?: string; + /** An array of classes that define a classifier. **/ classes?: Class[]; } + /** Classifier and score combination. **/ export interface ClassifierResult { + /** Name of the classifier. **/ name: string; + /** Classifier ID. Only returned if custom classifier. **/ classifier_id: string; + /** An array of classes within a classifier. **/ classes: ClassResult[]; } + /** Verbose list of classifiers retrieved in the GET v2/classifiers call. **/ export interface Classifiers { classifiers: Classifier[]; } + /** DetectedFaces. **/ export interface DetectedFaces { + /** Number of images processed for the API call. **/ images_processed?: number; + /** The array of images. **/ images: ImageWithFaces[]; + /** Information about what might cause less than optimal output. For example, a request sent with a corrupt .zip file and a list of image URLs will still complete, but does not return the expected output. Not returned when there is no warning. **/ warnings?: WarningInfo[]; } + /** Information about what might have caused a failure, such as an image that is too large. Not returned when there is no error. **/ export interface ErrorInfo { + /** Codified error string. For example, `limit_exceeded`. **/ error_id: string; + /** Human-readable error description. For example, `File size limit exceeded`. **/ description: string; } + /** Provides information about the face. **/ export interface Face { age?: FaceAge; gender?: FaceGender; @@ -505,40 +609,64 @@ namespace GeneratedVisualRecognitionV3 { identity?: FaceIdentity; } + /** Provides age information about a face. If there are more than 10 faces in an image, the response might return the confidence score `0g. **/ export interface FaceAge { + /** Estimated minimum age. **/ min?: number; + /** Estimated maximum age. **/ max?: number; + /** Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. **/ score?: number; } + /** Provides information about the gender of the face. If there are more than 10 faces in an image, the response might return the confidence score 0. **/ export interface FaceGender { + /** Gender identified by the face. For example, `MALE` or `FEMALE`. **/ gender: string; + /** Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. **/ score?: number; } + /** Provides information about a celebrity who is detected in the image. Not returned when a celebrity is not detected. **/ export interface FaceIdentity { + /** Name of the person. **/ name: string; + /** Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. **/ score?: number; + /** Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. **/ type_hierarchy?: string; } + /** Defines the location of the bounding box around the face. **/ export interface FaceLocation { + /** Width in pixels of face region. **/ width: number; + /** Height in pixels of face region. **/ height: number; + /** X-position of top-left pixel of face region. **/ left: number; + /** Y-position of top-left pixel of face region. **/ top: number; } + /** ImageWithFaces. **/ export interface ImageWithFaces { + /** An array of the faces detected in the images. **/ faces: Face[]; + /** Relative path of the image file if uploaded directly. Not returned when the image is passed by URL. **/ image?: string; + /** Source of the image before any redirects. Not returned when the image is uploaded. **/ source_url?: string; + /** Fully resolved URL of the image after redirects are followed. Not returned when the image is uploaded. **/ resolved_url?: string; error?: ErrorInfo; } + /** Information about something that went wrong. **/ export interface WarningInfo { + /** Codified warning string, such as `limit_reached`. **/ warning_id: string; + /** Information about the error. **/ description: string; } }