diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 00000000000..134daa4bed4
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,31 @@
+# Code owners file.
+# This file controls who is tagged for review for any given pull request.
+
+# Luke Sneeringer is the default owner for anything not explicitly taken by
+# someone else.
+* @lukesneeringer
+
+# Dave Gramlich is the primary author of the scripts that generate the
+# documentation and run test scripts.
+docs/* @callmehiphop
+scripts/* @callmehiphop
+
+# Dave Gramlich and Stephen Sawchuk are the primary authors and share
+# responsibility for most libraries and tests.
+packages/* @callmehiphop @stephenplusplus
+system-test/* @callmehiphop @stephenplusplus
+test/* @callmehiphop @stephenplusplus
+
+# @GoogleCloudPlatform/node-team is responsible for the Logging extension
+# libraries.
+packages/error-reporting/* @GoogleCloudPlatform/node-team
+packages/logging-bunyan/* @GoogleCloudPlatform/node-team
+packages/logging-winston/* @GoogleCloudPlatform/node-team
+
+# Luke Sneeringer is the primary author of the ML API clients.
+packages/dlp/* @lukesneeringer
+packages/language/* @lukesneeringer
+packages/monitoring/* @lukesneeringer
+packages/speech/* @lukesneeringer
+packages/video-intelligence/* @lukesneeringer
+packages/vision/* @lukesneeringer
diff --git a/packages/error-reporting/README.md b/packages/error-reporting/README.md
index a47d30dd017..e8529aa573e 100644
--- a/packages/error-reporting/README.md
+++ b/packages/error-reporting/README.md
@@ -116,7 +116,9 @@ Alternatively, you can also follow the instructions for using a service account
## Running Elsewhere
-If your application is running outside of Google Cloud Platform, such as locally, on-premise, or on another cloud provider, you can still use Stackdriver Errors.
+If your application is running outside of Google Cloud Platform, such as locally, on-premise, or on another cloud provider, you can still use Stackdriver Errors either with locally-stored credentials or with an API Key.
+
+### Using Locally-Stored Credentials
1. You will need to specify your project ID when starting the errors agent.
@@ -145,6 +147,25 @@ If your application is running outside of Google Cloud Platform, such as locally
When running on Google Cloud Platform, we handle these for you automatically.
+### Using an API Key
+
+You may use an API key in lieu of locally-stored credentials. Please see [this document][api-key] on how to set up an API key if you do not already have one.
+
+Once you have obtained an API key, you may provide it as part of the Error Reporting instance configuration:
+
+```js
+var errors = require('@google-cloud/error-reporting')({
+ projectId: '{your project ID}',
+ key: '{your api key}'
+});
+```
+
+If a key is provided, the module will not attempt to authenticate using the methods associated with locally-stored credentials as mentioned in the previous section.
+
+We recommend using a file, environment variable, or another mechanism to store the API key rather than hard-coding it into your application's source.
+
+**Note:** The Error Reporting instance will check if the provided API key is invalid shortly after it is instantiated. If the key is invalid, an error-level message will be logged to stdout.
+
## Configuration
The following code snippet lists all available configuration options. All configuration options are optional.
@@ -286,6 +307,7 @@ server.head('/hello/:name', respond);
server.listen(3000);
```
+[api-key]: https://support.google.com/cloud/answer/6158862
[app-default-credentials]: https://developers.google.com/identity/protocols/application-default-credentials
[express-error-docs]: https://expressjs.com/en/guide/error-handling.html
[gcloud-sdk]: https://cloud.google.com/sdk/gcloud/
diff --git a/packages/error-reporting/src/google-apis/auth-client.js b/packages/error-reporting/src/google-apis/auth-client.js
index b7587e4308d..1459e929261 100644
--- a/packages/error-reporting/src/google-apis/auth-client.js
+++ b/packages/error-reporting/src/google-apis/auth-client.js
@@ -27,7 +27,7 @@ var isString = is.string;
var SCOPES = ['https://www.googleapis.com/auth/cloud-platform'];
/* @const {String} Base Error Reporting API */
-var API = 'https://clouderrorreporting.googleapis.com/v1beta1/projects';
+var API = 'https://clouderrorreporting.googleapis.com/v1beta1';
/**
* The RequestHandler constructor initializes several properties on the
@@ -70,6 +70,7 @@ class RequestHandler extends common.Service {
}
return null;
}
+
/**
* No-operation stub function for user callback substitution
* @param {Error|Null} err - the error
@@ -88,26 +89,47 @@ class RequestHandler extends common.Service {
*/
constructor(config, logger) {
var pid = config.getProjectId();
+ // If an API key is provided, do not try to authenticate.
+ var tryAuthenticate = !config.getKey();
super({
packageJson: pkg,
- baseUrl: 'https://clouderrorreporting.googleapis.com/v1beta1/',
+ baseUrl: API,
scopes: SCOPES,
projectId: pid !== null ? pid : undefined,
- projectIdRequired: true
+ projectIdRequired: true,
+ customEndpoint: !tryAuthenticate
}, config);
this._config = config;
this._logger = logger;
var that = this;
- this.authClient.getToken(function(err, token) {
- if (err) {
- that._logger.error([
- 'Unable to find credential information on instance. This library',
- 'will be unable to communicate with the Stackdriver API to save',
- 'errors. Message: ' + err.message
- ].join(' '));
- }
- });
+ if (tryAuthenticate) {
+ this.authClient.getToken(function(err, token) {
+ if (err) {
+ that._logger.error([
+ 'Unable to find credential information on instance. This library',
+ 'will be unable to communicate with the Stackdriver API to save',
+ 'errors. Message: ' + err.message
+ ].join(' '));
+ }
+ });
+ } else {
+ this.request({
+ uri: 'events:report',
+ qs: RequestHandler.manufactureQueryString(this._config.getKey()),
+ method: 'POST',
+ json: {}
+ }, (err, body, response) => {
+ if (err && err.message !== 'Message cannot be empty.' &&
+ response.statusCode === 400) {
+ this._logger.error([
+ 'Encountered an error while attempting to validate the provided',
+ 'API key'
+ ].join(' '), err);
+ }
+ });
+ that._logger.info('API key provided; skipping OAuth2 token request.');
+ }
}
/**
* Creates a request options object given the value of the error message and
diff --git a/packages/error-reporting/system-test/testAuthClient.js b/packages/error-reporting/system-test/error-reporting.js
similarity index 94%
rename from packages/error-reporting/system-test/testAuthClient.js
rename to packages/error-reporting/system-test/error-reporting.js
index 251e5cf4ff7..9acb8d90551 100644
--- a/packages/error-reporting/system-test/testAuthClient.js
+++ b/packages/error-reporting/system-test/error-reporting.js
@@ -31,6 +31,7 @@ var forEach = require('lodash.foreach');
var assign = require('lodash.assign');
var pick = require('lodash.pick');
var omitBy = require('lodash.omitby');
+var request = require('request');
var util = require('util');
var path = require('path');
@@ -362,6 +363,41 @@ describe('Expected Behavior', function() {
});
});
+describe('Error Reporting API', function() {
+ [
+ {
+ name: 'when a valid API key is given',
+ getKey: () => env.apiKey,
+ message: 'Message cannot be empty.'
+ },
+ {
+ name: 'when an empty API key is given',
+ getKey: () => '',
+ message: 'The request is missing a valid API key.'
+ },
+ {
+ name: 'when an invalid API key is given',
+ getKey: () => env.apiKey.slice(1) + env.apiKey[0],
+ message: 'API key not valid. Please pass a valid API key.'
+ }
+ ].forEach(function(testCase) {
+ it(`should return an expected message ${testCase.name}`, function(done) {
+ this.timeout(30000);
+ const API = 'https://clouderrorreporting.googleapis.com/v1beta1';
+ const key = testCase.getKey();
+ request.post({
+ url: `${API}/projects/${env.projectId}/events:report?key=${key}`,
+ json: {},
+ }, (err, response, body) => {
+ assert.ok(!err && body.error);
+ assert.strictEqual(response.statusCode, 400);
+ assert.strictEqual(body.error.message, testCase.message);
+ done();
+ });
+ });
+ });
+});
+
describe('error-reporting', function() {
const SRC_ROOT = path.join(__dirname, '..', 'src');
const TIMESTAMP = Date.now();
diff --git a/packages/error-reporting/test/unit/google-apis/auth-client.js b/packages/error-reporting/test/unit/google-apis/auth-client.js
index cda05f05fca..3278a1c6a92 100644
--- a/packages/error-reporting/test/unit/google-apis/auth-client.js
+++ b/packages/error-reporting/test/unit/google-apis/auth-client.js
@@ -20,7 +20,7 @@ var proxyquire = require('proxyquire');
var Configuration = require('../../../src/configuration.js');
-function verifyReportedMessage(errToReturn, expectedMessage) {
+function verifyReportedMessage(config, errToReturn, expectedLogs) {
class ServiceStub {
constructor() {
this.authClient = {
@@ -28,6 +28,7 @@ function verifyReportedMessage(errToReturn, expectedMessage) {
cb(errToReturn);
}
};
+ this.request = function() {};
}
}
@@ -37,28 +38,50 @@ function verifyReportedMessage(errToReturn, expectedMessage) {
}
});
- var message = '';
+ var logs = {};
var logger = {
error: function(text) {
- message += text;
+ if (!logs.error) {
+ logs.error = '';
+ }
+ logs.error += text;
+ },
+ info: function(text) {
+ if (!logs.info) {
+ logs.info = '';
+ }
+ logs.info += text;
}
};
- var config = new Configuration({ ignoreEnvironmentCheck: true }, logger);
+ var config = new Configuration(config, logger);
new RequestHandler(config, logger);
- assert.strictEqual(message, expectedMessage);
+ assert.deepStrictEqual(logs, expectedLogs);
}
-
describe('RequestHandler', function() {
+ it('should not request OAuth2 token if key is provided', function() {
+ var config = {
+ ignoreEnvironmentCheck: true,
+ key: 'key'
+ };
+ var message = 'Made OAuth2 Token Request';
+ verifyReportedMessage(config, new Error(message), {
+ info: 'API key provided; skipping OAuth2 token request.'
+ });
+ });
+
it('should issue a warning if it cannot communicate with the API', function() {
+ var config = { ignoreEnvironmentCheck: true };
var message = 'Test Error';
- verifyReportedMessage(new Error(message),
- 'Unable to find credential information on instance. This library ' +
- 'will be unable to communicate with the Stackdriver API to save ' +
- 'errors. Message: ' + message);
+ verifyReportedMessage(config, new Error(message), {
+ error: 'Unable to find credential information on instance. This ' +
+ 'library will be unable to communicate with the Stackdriver API to ' +
+ 'save errors. Message: ' + message
+ });
});
it('should not issue a warning if it can communicate with the API', function() {
- verifyReportedMessage(null, '');
- verifyReportedMessage(undefined, '');
+ var config = { ignoreEnvironmentCheck: true };
+ verifyReportedMessage(config, null, {});
+ verifyReportedMessage(config, undefined, {});
});
});
diff --git a/packages/language/package.json b/packages/language/package.json
index 1914e3fa009..9225cbcb951 100644
--- a/packages/language/package.json
+++ b/packages/language/package.json
@@ -50,9 +50,8 @@
"Google Cloud Natural Language API"
],
"dependencies": {
- "google-proto-files": "^0.12.0",
- "google-gax": "^0.13.2",
- "extend": "^3.0.0"
+ "extend": "^3.0",
+ "google-gax": "^0.13.5"
},
"devDependencies": {
"mocha": "^3.2.0"
diff --git a/packages/language/protos/google/cloud/language/v1/language_service.proto b/packages/language/protos/google/cloud/language/v1/language_service.proto
new file mode 100644
index 00000000000..6620c2c632f
--- /dev/null
+++ b/packages/language/protos/google/cloud/language/v1/language_service.proto
@@ -0,0 +1,1007 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.language.v1;
+
+import "google/api/annotations.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1;language";
+option java_multiple_files = true;
+option java_outer_classname = "LanguageServiceProto";
+option java_package = "com.google.cloud.language.v1";
+
+
+// Provides text analysis operations such as sentiment analysis and entity
+// recognition.
+service LanguageService {
+ // Analyzes the sentiment of the provided text.
+ rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeSentiment" body: "*" };
+ }
+
+ // Finds named entities (currently proper names and common nouns) in the text
+ // along with entity types, salience, mentions for each entity, and
+ // other properties.
+ rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeEntities" body: "*" };
+ }
+
+ // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes
+ // sentiment associated with each entity and its mentions.
+ rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) returns (AnalyzeEntitySentimentResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeEntitySentiment" body: "*" };
+ }
+
+ // Analyzes the syntax of the text and provides sentence boundaries and
+ // tokenization along with part of speech tags, dependency trees, and other
+ // properties.
+ rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
+ option (google.api.http) = { post: "/v1/documents:analyzeSyntax" body: "*" };
+ }
+
+ // A convenience method that provides all the features that analyzeSentiment,
+ // analyzeEntities, and analyzeSyntax provide in one call.
+ rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
+ option (google.api.http) = { post: "/v1/documents:annotateText" body: "*" };
+ }
+}
+
+// ################################################################ #
+//
+// Represents the input to API methods.
+message Document {
+ // The document types enum.
+ enum Type {
+ // The content type is not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Plain text
+ PLAIN_TEXT = 1;
+
+ // HTML
+ HTML = 2;
+ }
+
+ // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
+ // returns an `INVALID_ARGUMENT` error.
+ Type type = 1;
+
+ // The source of the document: a string containing the content or a
+ // Google Cloud Storage URI.
+ oneof source {
+ // The content of the input in string format.
+ string content = 2;
+
+ // The Google Cloud Storage URI where the file content is located.
+ // This URI must be of the form: gs://bucket_name/object_name. For more
+ // details, see https://cloud.google.com/storage/docs/reference-uris.
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_content_uri = 3;
+ }
+
+ // The language of the document (if not specified, the language is
+ // automatically detected). Both ISO and BCP-47 language codes are
+ // accepted.
+ // [Language Support](/natural-language/docs/languages)
+ // lists currently supported languages for each API method.
+ // If the language (either specified by the caller or automatically detected)
+ // is not supported by the called API method, an `INVALID_ARGUMENT` error
+ // is returned.
+ string language = 4;
+}
+
+// Represents a sentence in the input document.
+message Sentence {
+ // The sentence text.
+ TextSpan text = 1;
+
+ // For calls to [AnalyzeSentiment][] or if
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] is set to
+ // true, this field will contain the sentiment for the sentence.
+ Sentiment sentiment = 2;
+}
+
+// Represents a phrase in the text that is a known entity, such as
+// a person, an organization, or location. The API associates information, such
+// as salience and mentions, with entities.
+message Entity {
+ // The type of the entity.
+ enum Type {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Person
+ PERSON = 1;
+
+ // Location
+ LOCATION = 2;
+
+ // Organization
+ ORGANIZATION = 3;
+
+ // Event
+ EVENT = 4;
+
+ // Work of art
+ WORK_OF_ART = 5;
+
+ // Consumer goods
+ CONSUMER_GOOD = 6;
+
+ // Other types
+ OTHER = 7;
+ }
+
+ // The representative name for the entity.
+ string name = 1;
+
+ // The entity type.
+ Type type = 2;
+
+ // Metadata associated with the entity.
+ //
+ // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
+ // available. The associated keys are "wikipedia_url" and "mid", respectively.
+ map metadata = 3;
+
+ // The salience score associated with the entity in the [0, 1.0] range.
+ //
+ // The salience score for an entity provides information about the
+ // importance or centrality of that entity to the entire document text.
+ // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
+ // salient.
+ float salience = 4;
+
+ // The mentions of this entity in the input document. The API currently
+ // supports proper noun mentions.
+ repeated EntityMention mentions = 5;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the aggregate sentiment expressed for this
+ // entity in the provided document.
+ Sentiment sentiment = 6;
+}
+
+// Represents the smallest syntactic building block of the text.
+message Token {
+ // The token text.
+ TextSpan text = 1;
+
+ // Parts of speech tag for this token.
+ PartOfSpeech part_of_speech = 2;
+
+ // Dependency tree parse for this token.
+ DependencyEdge dependency_edge = 3;
+
+ // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
+ string lemma = 4;
+}
+
+// Represents the feeling associated with the entire text or entities in
+// the text.
+message Sentiment {
+ // A non-negative number in the [0, +inf) range, which represents
+ // the absolute magnitude of sentiment regardless of score (positive or
+ // negative).
+ float magnitude = 2;
+
+ // Sentiment score between -1.0 (negative sentiment) and 1.0
+ // (positive sentiment).
+ float score = 3;
+}
+
+// Represents part of speech information for a token. Parts of speech
+// are as defined in
+// http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
+message PartOfSpeech {
+ // The part of speech tags enum.
+ enum Tag {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Adjective
+ ADJ = 1;
+
+ // Adposition (preposition and postposition)
+ ADP = 2;
+
+ // Adverb
+ ADV = 3;
+
+ // Conjunction
+ CONJ = 4;
+
+ // Determiner
+ DET = 5;
+
+ // Noun (common and proper)
+ NOUN = 6;
+
+ // Cardinal number
+ NUM = 7;
+
+ // Pronoun
+ PRON = 8;
+
+ // Particle or other function word
+ PRT = 9;
+
+ // Punctuation
+ PUNCT = 10;
+
+ // Verb (all tenses and modes)
+ VERB = 11;
+
+ // Other: foreign words, typos, abbreviations
+ X = 12;
+
+ // Affix
+ AFFIX = 13;
+ }
+
+ // The characteristic of a verb that expresses time flow during an event.
+ enum Aspect {
+ // Aspect is not applicable in the analyzed language or is not predicted.
+ ASPECT_UNKNOWN = 0;
+
+ // Perfective
+ PERFECTIVE = 1;
+
+ // Imperfective
+ IMPERFECTIVE = 2;
+
+ // Progressive
+ PROGRESSIVE = 3;
+ }
+
+ // The grammatical function performed by a noun or pronoun in a phrase,
+ // clause, or sentence. In some languages, other parts of speech, such as
+ // adjective and determiner, take case inflection in agreement with the noun.
+ enum Case {
+ // Case is not applicable in the analyzed language or is not predicted.
+ CASE_UNKNOWN = 0;
+
+ // Accusative
+ ACCUSATIVE = 1;
+
+ // Adverbial
+ ADVERBIAL = 2;
+
+ // Complementive
+ COMPLEMENTIVE = 3;
+
+ // Dative
+ DATIVE = 4;
+
+ // Genitive
+ GENITIVE = 5;
+
+ // Instrumental
+ INSTRUMENTAL = 6;
+
+ // Locative
+ LOCATIVE = 7;
+
+ // Nominative
+ NOMINATIVE = 8;
+
+ // Oblique
+ OBLIQUE = 9;
+
+ // Partitive
+ PARTITIVE = 10;
+
+ // Prepositional
+ PREPOSITIONAL = 11;
+
+ // Reflexive
+ REFLEXIVE_CASE = 12;
+
+ // Relative
+ RELATIVE_CASE = 13;
+
+ // Vocative
+ VOCATIVE = 14;
+ }
+
+ // Depending on the language, Form can be categorizing different forms of
+ // verbs, adjectives, adverbs, etc. For example, categorizing inflected
+ // endings of verbs and adjectives or distinguishing between short and long
+ // forms of adjectives and participles
+ enum Form {
+ // Form is not applicable in the analyzed language or is not predicted.
+ FORM_UNKNOWN = 0;
+
+ // Adnomial
+ ADNOMIAL = 1;
+
+ // Auxiliary
+ AUXILIARY = 2;
+
+ // Complementizer
+ COMPLEMENTIZER = 3;
+
+ // Final ending
+ FINAL_ENDING = 4;
+
+ // Gerund
+ GERUND = 5;
+
+ // Realis
+ REALIS = 6;
+
+ // Irrealis
+ IRREALIS = 7;
+
+ // Short form
+ SHORT = 8;
+
+ // Long form
+ LONG = 9;
+
+ // Order form
+ ORDER = 10;
+
+ // Specific form
+ SPECIFIC = 11;
+ }
+
+ // Gender classes of nouns reflected in the behaviour of associated words.
+ enum Gender {
+ // Gender is not applicable in the analyzed language or is not predicted.
+ GENDER_UNKNOWN = 0;
+
+ // Feminine
+ FEMININE = 1;
+
+ // Masculine
+ MASCULINE = 2;
+
+ // Neuter
+ NEUTER = 3;
+ }
+
+ // The grammatical feature of verbs, used for showing modality and attitude.
+ enum Mood {
+ // Mood is not applicable in the analyzed language or is not predicted.
+ MOOD_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_MOOD = 1;
+
+ // Imperative
+ IMPERATIVE = 2;
+
+ // Indicative
+ INDICATIVE = 3;
+
+ // Interrogative
+ INTERROGATIVE = 4;
+
+ // Jussive
+ JUSSIVE = 5;
+
+ // Subjunctive
+ SUBJUNCTIVE = 6;
+ }
+
+ // Count distinctions.
+ enum Number {
+ // Number is not applicable in the analyzed language or is not predicted.
+ NUMBER_UNKNOWN = 0;
+
+ // Singular
+ SINGULAR = 1;
+
+ // Plural
+ PLURAL = 2;
+
+ // Dual
+ DUAL = 3;
+ }
+
+ // The distinction between the speaker, second person, third person, etc.
+ enum Person {
+ // Person is not applicable in the analyzed language or is not predicted.
+ PERSON_UNKNOWN = 0;
+
+ // First
+ FIRST = 1;
+
+ // Second
+ SECOND = 2;
+
+ // Third
+ THIRD = 3;
+
+ // Reflexive
+ REFLEXIVE_PERSON = 4;
+ }
+
+ // This category shows if the token is part of a proper name.
+ enum Proper {
+ // Proper is not applicable in the analyzed language or is not predicted.
+ PROPER_UNKNOWN = 0;
+
+ // Proper
+ PROPER = 1;
+
+ // Not proper
+ NOT_PROPER = 2;
+ }
+
+ // Reciprocal features of a pronoun.
+ enum Reciprocity {
+ // Reciprocity is not applicable in the analyzed language or is not
+ // predicted.
+ RECIPROCITY_UNKNOWN = 0;
+
+ // Reciprocal
+ RECIPROCAL = 1;
+
+ // Non-reciprocal
+ NON_RECIPROCAL = 2;
+ }
+
+ // Time reference.
+ enum Tense {
+ // Tense is not applicable in the analyzed language or is not predicted.
+ TENSE_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_TENSE = 1;
+
+ // Future
+ FUTURE = 2;
+
+ // Past
+ PAST = 3;
+
+ // Present
+ PRESENT = 4;
+
+ // Imperfect
+ IMPERFECT = 5;
+
+ // Pluperfect
+ PLUPERFECT = 6;
+ }
+
+ // The relationship between the action that a verb expresses and the
+ // participants identified by its arguments.
+ enum Voice {
+ // Voice is not applicable in the analyzed language or is not predicted.
+ VOICE_UNKNOWN = 0;
+
+ // Active
+ ACTIVE = 1;
+
+ // Causative
+ CAUSATIVE = 2;
+
+ // Passive
+ PASSIVE = 3;
+ }
+
+ // The part of speech tag.
+ Tag tag = 1;
+
+ // The grammatical aspect.
+ Aspect aspect = 2;
+
+ // The grammatical case.
+ Case case = 3;
+
+ // The grammatical form.
+ Form form = 4;
+
+ // The grammatical gender.
+ Gender gender = 5;
+
+ // The grammatical mood.
+ Mood mood = 6;
+
+ // The grammatical number.
+ Number number = 7;
+
+ // The grammatical person.
+ Person person = 8;
+
+ // The grammatical properness.
+ Proper proper = 9;
+
+ // The grammatical reciprocity.
+ Reciprocity reciprocity = 10;
+
+ // The grammatical tense.
+ Tense tense = 11;
+
+ // The grammatical voice.
+ Voice voice = 12;
+}
+
+// Represents dependency parse tree information for a token. (For more
+// information on dependency labels, see
+// http://www.aclweb.org/anthology/P13-2017
+message DependencyEdge {
+ // The parse label enum for the token.
+ enum Label {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Abbreviation modifier
+ ABBREV = 1;
+
+ // Adjectival complement
+ ACOMP = 2;
+
+ // Adverbial clause modifier
+ ADVCL = 3;
+
+ // Adverbial modifier
+ ADVMOD = 4;
+
+ // Adjectival modifier of an NP
+ AMOD = 5;
+
+ // Appositional modifier of an NP
+ APPOS = 6;
+
+ // Attribute dependent of a copular verb
+ ATTR = 7;
+
+ // Auxiliary (non-main) verb
+ AUX = 8;
+
+ // Passive auxiliary
+ AUXPASS = 9;
+
+ // Coordinating conjunction
+ CC = 10;
+
+ // Clausal complement of a verb or adjective
+ CCOMP = 11;
+
+ // Conjunct
+ CONJ = 12;
+
+ // Clausal subject
+ CSUBJ = 13;
+
+ // Clausal passive subject
+ CSUBJPASS = 14;
+
+ // Dependency (unable to determine)
+ DEP = 15;
+
+ // Determiner
+ DET = 16;
+
+ // Discourse
+ DISCOURSE = 17;
+
+ // Direct object
+ DOBJ = 18;
+
+ // Expletive
+ EXPL = 19;
+
+ // Goes with (part of a word in a text not well edited)
+ GOESWITH = 20;
+
+ // Indirect object
+ IOBJ = 21;
+
+ // Marker (word introducing a subordinate clause)
+ MARK = 22;
+
+ // Multi-word expression
+ MWE = 23;
+
+ // Multi-word verbal expression
+ MWV = 24;
+
+ // Negation modifier
+ NEG = 25;
+
+ // Noun compound modifier
+ NN = 26;
+
+ // Noun phrase used as an adverbial modifier
+ NPADVMOD = 27;
+
+ // Nominal subject
+ NSUBJ = 28;
+
+ // Passive nominal subject
+ NSUBJPASS = 29;
+
+ // Numeric modifier of a noun
+ NUM = 30;
+
+ // Element of compound number
+ NUMBER = 31;
+
+ // Punctuation mark
+ P = 32;
+
+ // Parataxis relation
+ PARATAXIS = 33;
+
+ // Participial modifier
+ PARTMOD = 34;
+
+ // The complement of a preposition is a clause
+ PCOMP = 35;
+
+ // Object of a preposition
+ POBJ = 36;
+
+ // Possession modifier
+ POSS = 37;
+
+ // Postverbal negative particle
+ POSTNEG = 38;
+
+ // Predicate complement
+ PRECOMP = 39;
+
+ // Preconjunt
+ PRECONJ = 40;
+
+ // Predeterminer
+ PREDET = 41;
+
+ // Prefix
+ PREF = 42;
+
+ // Prepositional modifier
+ PREP = 43;
+
+ // The relationship between a verb and verbal morpheme
+ PRONL = 44;
+
+ // Particle
+ PRT = 45;
+
+ // Associative or possessive marker
+ PS = 46;
+
+ // Quantifier phrase modifier
+ QUANTMOD = 47;
+
+ // Relative clause modifier
+ RCMOD = 48;
+
+ // Complementizer in relative clause
+ RCMODREL = 49;
+
+ // Ellipsis without a preceding predicate
+ RDROP = 50;
+
+ // Referent
+ REF = 51;
+
+ // Remnant
+ REMNANT = 52;
+
+ // Reparandum
+ REPARANDUM = 53;
+
+ // Root
+ ROOT = 54;
+
+ // Suffix specifying a unit of number
+ SNUM = 55;
+
+ // Suffix
+ SUFF = 56;
+
+ // Temporal modifier
+ TMOD = 57;
+
+ // Topic marker
+ TOPIC = 58;
+
+ // Clause headed by an infinite form of the verb that modifies a noun
+ VMOD = 59;
+
+ // Vocative
+ VOCATIVE = 60;
+
+ // Open clausal complement
+ XCOMP = 61;
+
+ // Name suffix
+ SUFFIX = 62;
+
+ // Name title
+ TITLE = 63;
+
+ // Adverbial phrase modifier
+ ADVPHMOD = 64;
+
+ // Causative auxiliary
+ AUXCAUS = 65;
+
+ // Helper auxiliary
+ AUXVV = 66;
+
+ // Rentaishi (Prenominal modifier)
+ DTMOD = 67;
+
+ // Foreign words
+ FOREIGN = 68;
+
+ // Keyword
+ KW = 69;
+
+ // List for chains of comparable items
+ LIST = 70;
+
+ // Nominalized clause
+ NOMC = 71;
+
+ // Nominalized clausal subject
+ NOMCSUBJ = 72;
+
+ // Nominalized clausal passive
+ NOMCSUBJPASS = 73;
+
+ // Compound of numeric modifier
+ NUMC = 74;
+
+ // Copula
+ COP = 75;
+
+ // Dislocated relation (for fronted/topicalized elements)
+ DISLOCATED = 76;
+
+ // Aspect marker
+ ASP = 77;
+
+ // Genitive modifier
+ GMOD = 78;
+
+ // Genitive object
+ GOBJ = 79;
+
+ // Infinitival modifier
+ INFMOD = 80;
+
+ // Measure
+ MES = 81;
+
+ // Nominal complement of a noun
+ NCOMP = 82;
+ }
+
+ // Represents the head of this token in the dependency tree.
+ // This is the index of the token which has an arc going to this token.
+ // The index is the position of the token in the array of tokens returned
+ // by the API method. If this token is a root token, then the
+ // `head_token_index` is its own index.
+ int32 head_token_index = 1;
+
+ // The parse label for the token.
+ Label label = 2;
+}
+
+// Represents a mention for an entity in the text. Currently, proper noun
+// mentions are supported.
+message EntityMention {
+ // The supported types of mentions.
+ enum Type {
+ // Unknown
+ TYPE_UNKNOWN = 0;
+
+ // Proper name
+ PROPER = 1;
+
+ // Common noun (or noun compound)
+ COMMON = 2;
+ }
+
+ // The mention text.
+ TextSpan text = 1;
+
+ // The type of the entity mention.
+ Type type = 2;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the sentiment expressed for this mention of
+ // the entity in the provided document.
+ Sentiment sentiment = 3;
+}
+
+// Represents an output piece of text.
+message TextSpan {
+ // The content of the output text.
+ string content = 1;
+
+ // The API calculates the beginning offset of the content in the original
+ // document according to the [EncodingType][google.cloud.language.v1.EncodingType] specified in the API request.
+ int32 begin_offset = 2;
+}
+
+// The sentiment analysis request message.
+message AnalyzeSentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate sentence offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The sentiment analysis response message.
+message AnalyzeSentimentResponse {
+ // The overall sentiment of the input document.
+ Sentiment document_sentiment = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 2;
+
+ // The sentiment for all the sentences in the document.
+ repeated Sentence sentences = 3;
+}
+
+// The entity-level sentiment analysis request message.
+message AnalyzeEntitySentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity-level sentiment analysis response message.
+message AnalyzeEntitySentimentResponse {
+ // The recognized entities in the input document with associated sentiments.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 2;
+}
+
+// The entity analysis request message.
+message AnalyzeEntitiesRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity analysis response message.
+message AnalyzeEntitiesResponse {
+ // The recognized entities in the input document.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 2;
+}
+
+// The syntax analysis request message.
+message AnalyzeSyntaxRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The syntax analysis response message.
+message AnalyzeSyntaxResponse {
+ // Sentences in the input document.
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ repeated Token tokens = 2;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 3;
+}
+
+// The request message for the text annotation API, which can perform multiple
+// analysis types (sentiment, entities, and syntax) in one call.
+message AnnotateTextRequest {
+ // All available features for sentiment, syntax, and semantic analysis.
+ // Setting each one to true will enable that specific analysis for the input.
+ message Features {
+ // Extract syntax information.
+ bool extract_syntax = 1;
+
+ // Extract entities.
+ bool extract_entities = 2;
+
+ // Extract document-level sentiment.
+ bool extract_document_sentiment = 3;
+
+ // Extract entities and their associated sentiment.
+ bool extract_entity_sentiment = 4;
+ }
+
+ // Input document.
+ Document document = 1;
+
+ // The enabled features.
+ Features features = 2;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 3;
+}
+
+// The text annotations response message.
+message AnnotateTextResponse {
+ // Sentences in the input document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
+ repeated Token tokens = 2;
+
+ // Entities, along with their semantic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities].
+ repeated Entity entities = 3;
+
+ // The overall sentiment for the document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment].
+ Sentiment document_sentiment = 4;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
+ string language = 5;
+}
+
+// Represents the text encoding that the caller uses to process the output.
+// Providing an `EncodingType` is recommended because the API provides the
+// beginning offsets for various outputs, such as tokens and mentions, and
+// languages that natively use different text encodings may access offsets
+// differently.
+enum EncodingType {
+ // If `EncodingType` is not specified, encoding-dependent information (such as
+ // `begin_offset`) will be set at `-1`.
+ NONE = 0;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-8 encoding of the input. C++ and Go are examples of languages
+ // that use this encoding natively.
+ UTF8 = 1;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-16 encoding of the input. Java and Javascript are examples of
+ // languages that use this encoding natively.
+ UTF16 = 2;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-32 encoding of the input. Python is an example of a language
+ // that uses this encoding natively.
+ UTF32 = 3;
+}
diff --git a/packages/language/protos/google/cloud/language/v1beta2/language_service.proto b/packages/language/protos/google/cloud/language/v1beta2/language_service.proto
new file mode 100644
index 00000000000..54c6638cd88
--- /dev/null
+++ b/packages/language/protos/google/cloud/language/v1beta2/language_service.proto
@@ -0,0 +1,1040 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.language.v1beta2;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1beta2;language";
+option java_multiple_files = true;
+option java_outer_classname = "LanguageServiceProto";
+option java_package = "com.google.cloud.language.v1beta2";
+
+
+// Provides text analysis operations such as sentiment analysis and entity
+// recognition.
+service LanguageService {
+ // Analyzes the sentiment of the provided text.
+ rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeSentiment" body: "*" };
+ }
+
+ // Finds named entities (currently proper names and common nouns) in the text
+ // along with entity types, salience, mentions for each entity, and
+ // other properties.
+ rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeEntities" body: "*" };
+ }
+
+ // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
+ // sentiment associated with each entity and its mentions.
+ rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) returns (AnalyzeEntitySentimentResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeEntitySentiment" body: "*" };
+ }
+
+ // Analyzes the syntax of the text and provides sentence boundaries and
+ // tokenization along with part of speech tags, dependency trees, and other
+ // properties.
+ rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:analyzeSyntax" body: "*" };
+ }
+
+ // Classifies a document into categories.
+ rpc ClassifyText(ClassifyTextRequest) returns (ClassifyTextResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:classifyText" body: "*" };
+ }
+
+ // A convenience method that provides all syntax, sentiment, entity, and
+ // classification features in one call.
+ rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
+ option (google.api.http) = { post: "/v1beta2/documents:annotateText" body: "*" };
+ }
+}
+
+// ################################################################ #
+//
+// Represents the input to API methods.
+message Document {
+ // The document types enum.
+ enum Type {
+ // The content type is not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Plain text
+ PLAIN_TEXT = 1;
+
+ // HTML
+ HTML = 2;
+ }
+
+ // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
+ // returns an `INVALID_ARGUMENT` error.
+ Type type = 1;
+
+ // The source of the document: a string containing the content or a
+ // Google Cloud Storage URI.
+ oneof source {
+ // The content of the input in string format.
+ string content = 2;
+
+ // The Google Cloud Storage URI where the file content is located.
+ // This URI must be of the form: gs://bucket_name/object_name. For more
+ // details, see https://cloud.google.com/storage/docs/reference-uris.
+ // NOTE: Cloud Storage object versioning is not supported.
+ string gcs_content_uri = 3;
+ }
+
+ // The language of the document (if not specified, the language is
+ // automatically detected). Both ISO and BCP-47 language codes are
+ // accepted.
+ // [Language Support](/natural-language/docs/languages)
+ // lists currently supported languages for each API method.
+ // If the language (either specified by the caller or automatically detected)
+ // is not supported by the called API method, an `INVALID_ARGUMENT` error
+ // is returned.
+ string language = 4;
+}
+
+// Represents a sentence in the input document.
+message Sentence {
+ // The sentence text.
+ TextSpan text = 1;
+
+ // For calls to [AnalyzeSentiment][] or if
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] is set to
+ // true, this field will contain the sentiment for the sentence.
+ Sentiment sentiment = 2;
+}
+
+// Represents a phrase in the text that is a known entity, such as
+// a person, an organization, or location. The API associates information, such
+// as salience and mentions, with entities.
+message Entity {
+ // The type of the entity.
+ enum Type {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Person
+ PERSON = 1;
+
+ // Location
+ LOCATION = 2;
+
+ // Organization
+ ORGANIZATION = 3;
+
+ // Event
+ EVENT = 4;
+
+ // Work of art
+ WORK_OF_ART = 5;
+
+ // Consumer goods
+ CONSUMER_GOOD = 6;
+
+ // Other types
+ OTHER = 7;
+ }
+
+ // The representative name for the entity.
+ string name = 1;
+
+ // The entity type.
+ Type type = 2;
+
+ // Metadata associated with the entity.
+ //
+ // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
+ // available. The associated keys are "wikipedia_url" and "mid", respectively.
+ map metadata = 3;
+
+ // The salience score associated with the entity in the [0, 1.0] range.
+ //
+ // The salience score for an entity provides information about the
+ // importance or centrality of that entity to the entire document text.
+ // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
+ // salient.
+ float salience = 4;
+
+ // The mentions of this entity in the input document. The API currently
+ // supports proper noun mentions.
+ repeated EntityMention mentions = 5;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the aggregate sentiment expressed for this
+ // entity in the provided document.
+ Sentiment sentiment = 6;
+}
+
+// Represents the smallest syntactic building block of the text.
+message Token {
+ // The token text.
+ TextSpan text = 1;
+
+ // Parts of speech tag for this token.
+ PartOfSpeech part_of_speech = 2;
+
+ // Dependency tree parse for this token.
+ DependencyEdge dependency_edge = 3;
+
+ // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
+ string lemma = 4;
+}
+
+// Represents the feeling associated with the entire text or entities in
+// the text.
+message Sentiment {
+ // A non-negative number in the [0, +inf) range, which represents
+ // the absolute magnitude of sentiment regardless of score (positive or
+ // negative).
+ float magnitude = 2;
+
+ // Sentiment score between -1.0 (negative sentiment) and 1.0
+ // (positive sentiment).
+ float score = 3;
+}
+
+// Represents part of speech information for a token.
+message PartOfSpeech {
+ // The part of speech tags enum.
+ enum Tag {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Adjective
+ ADJ = 1;
+
+ // Adposition (preposition and postposition)
+ ADP = 2;
+
+ // Adverb
+ ADV = 3;
+
+ // Conjunction
+ CONJ = 4;
+
+ // Determiner
+ DET = 5;
+
+ // Noun (common and proper)
+ NOUN = 6;
+
+ // Cardinal number
+ NUM = 7;
+
+ // Pronoun
+ PRON = 8;
+
+ // Particle or other function word
+ PRT = 9;
+
+ // Punctuation
+ PUNCT = 10;
+
+ // Verb (all tenses and modes)
+ VERB = 11;
+
+ // Other: foreign words, typos, abbreviations
+ X = 12;
+
+ // Affix
+ AFFIX = 13;
+ }
+
+ // The characteristic of a verb that expresses time flow during an event.
+ enum Aspect {
+ // Aspect is not applicable in the analyzed language or is not predicted.
+ ASPECT_UNKNOWN = 0;
+
+ // Perfective
+ PERFECTIVE = 1;
+
+ // Imperfective
+ IMPERFECTIVE = 2;
+
+ // Progressive
+ PROGRESSIVE = 3;
+ }
+
+ // The grammatical function performed by a noun or pronoun in a phrase,
+ // clause, or sentence. In some languages, other parts of speech, such as
+ // adjective and determiner, take case inflection in agreement with the noun.
+ enum Case {
+ // Case is not applicable in the analyzed language or is not predicted.
+ CASE_UNKNOWN = 0;
+
+ // Accusative
+ ACCUSATIVE = 1;
+
+ // Adverbial
+ ADVERBIAL = 2;
+
+ // Complementive
+ COMPLEMENTIVE = 3;
+
+ // Dative
+ DATIVE = 4;
+
+ // Genitive
+ GENITIVE = 5;
+
+ // Instrumental
+ INSTRUMENTAL = 6;
+
+ // Locative
+ LOCATIVE = 7;
+
+ // Nominative
+ NOMINATIVE = 8;
+
+ // Oblique
+ OBLIQUE = 9;
+
+ // Partitive
+ PARTITIVE = 10;
+
+ // Prepositional
+ PREPOSITIONAL = 11;
+
+ // Reflexive
+ REFLEXIVE_CASE = 12;
+
+ // Relative
+ RELATIVE_CASE = 13;
+
+ // Vocative
+ VOCATIVE = 14;
+ }
+
+ // Depending on the language, Form can be categorizing different forms of
+ // verbs, adjectives, adverbs, etc. For example, categorizing inflected
+ // endings of verbs and adjectives or distinguishing between short and long
+ // forms of adjectives and participles
+ enum Form {
+ // Form is not applicable in the analyzed language or is not predicted.
+ FORM_UNKNOWN = 0;
+
+ // Adnomial
+ ADNOMIAL = 1;
+
+ // Auxiliary
+ AUXILIARY = 2;
+
+ // Complementizer
+ COMPLEMENTIZER = 3;
+
+ // Final ending
+ FINAL_ENDING = 4;
+
+ // Gerund
+ GERUND = 5;
+
+ // Realis
+ REALIS = 6;
+
+ // Irrealis
+ IRREALIS = 7;
+
+ // Short form
+ SHORT = 8;
+
+ // Long form
+ LONG = 9;
+
+ // Order form
+ ORDER = 10;
+
+ // Specific form
+ SPECIFIC = 11;
+ }
+
+ // Gender classes of nouns reflected in the behaviour of associated words.
+ enum Gender {
+ // Gender is not applicable in the analyzed language or is not predicted.
+ GENDER_UNKNOWN = 0;
+
+ // Feminine
+ FEMININE = 1;
+
+ // Masculine
+ MASCULINE = 2;
+
+ // Neuter
+ NEUTER = 3;
+ }
+
+ // The grammatical feature of verbs, used for showing modality and attitude.
+ enum Mood {
+ // Mood is not applicable in the analyzed language or is not predicted.
+ MOOD_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_MOOD = 1;
+
+ // Imperative
+ IMPERATIVE = 2;
+
+ // Indicative
+ INDICATIVE = 3;
+
+ // Interrogative
+ INTERROGATIVE = 4;
+
+ // Jussive
+ JUSSIVE = 5;
+
+ // Subjunctive
+ SUBJUNCTIVE = 6;
+ }
+
+ // Count distinctions.
+ enum Number {
+ // Number is not applicable in the analyzed language or is not predicted.
+ NUMBER_UNKNOWN = 0;
+
+ // Singular
+ SINGULAR = 1;
+
+ // Plural
+ PLURAL = 2;
+
+ // Dual
+ DUAL = 3;
+ }
+
+ // The distinction between the speaker, second person, third person, etc.
+ enum Person {
+ // Person is not applicable in the analyzed language or is not predicted.
+ PERSON_UNKNOWN = 0;
+
+ // First
+ FIRST = 1;
+
+ // Second
+ SECOND = 2;
+
+ // Third
+ THIRD = 3;
+
+ // Reflexive
+ REFLEXIVE_PERSON = 4;
+ }
+
+ // This category shows if the token is part of a proper name.
+ enum Proper {
+ // Proper is not applicable in the analyzed language or is not predicted.
+ PROPER_UNKNOWN = 0;
+
+ // Proper
+ PROPER = 1;
+
+ // Not proper
+ NOT_PROPER = 2;
+ }
+
+ // Reciprocal features of a pronoun.
+ enum Reciprocity {
+ // Reciprocity is not applicable in the analyzed language or is not
+ // predicted.
+ RECIPROCITY_UNKNOWN = 0;
+
+ // Reciprocal
+ RECIPROCAL = 1;
+
+ // Non-reciprocal
+ NON_RECIPROCAL = 2;
+ }
+
+ // Time reference.
+ enum Tense {
+ // Tense is not applicable in the analyzed language or is not predicted.
+ TENSE_UNKNOWN = 0;
+
+ // Conditional
+ CONDITIONAL_TENSE = 1;
+
+ // Future
+ FUTURE = 2;
+
+ // Past
+ PAST = 3;
+
+ // Present
+ PRESENT = 4;
+
+ // Imperfect
+ IMPERFECT = 5;
+
+ // Pluperfect
+ PLUPERFECT = 6;
+ }
+
+ // The relationship between the action that a verb expresses and the
+ // participants identified by its arguments.
+ enum Voice {
+ // Voice is not applicable in the analyzed language or is not predicted.
+ VOICE_UNKNOWN = 0;
+
+ // Active
+ ACTIVE = 1;
+
+ // Causative
+ CAUSATIVE = 2;
+
+ // Passive
+ PASSIVE = 3;
+ }
+
+ // The part of speech tag.
+ Tag tag = 1;
+
+ // The grammatical aspect.
+ Aspect aspect = 2;
+
+ // The grammatical case.
+ Case case = 3;
+
+ // The grammatical form.
+ Form form = 4;
+
+ // The grammatical gender.
+ Gender gender = 5;
+
+ // The grammatical mood.
+ Mood mood = 6;
+
+ // The grammatical number.
+ Number number = 7;
+
+ // The grammatical person.
+ Person person = 8;
+
+ // The grammatical properness.
+ Proper proper = 9;
+
+ // The grammatical reciprocity.
+ Reciprocity reciprocity = 10;
+
+ // The grammatical tense.
+ Tense tense = 11;
+
+ // The grammatical voice.
+ Voice voice = 12;
+}
+
+// Represents dependency parse tree information for a token.
+message DependencyEdge {
+ // The parse label enum for the token.
+ enum Label {
+ // Unknown
+ UNKNOWN = 0;
+
+ // Abbreviation modifier
+ ABBREV = 1;
+
+ // Adjectival complement
+ ACOMP = 2;
+
+ // Adverbial clause modifier
+ ADVCL = 3;
+
+ // Adverbial modifier
+ ADVMOD = 4;
+
+ // Adjectival modifier of an NP
+ AMOD = 5;
+
+ // Appositional modifier of an NP
+ APPOS = 6;
+
+ // Attribute dependent of a copular verb
+ ATTR = 7;
+
+ // Auxiliary (non-main) verb
+ AUX = 8;
+
+ // Passive auxiliary
+ AUXPASS = 9;
+
+ // Coordinating conjunction
+ CC = 10;
+
+ // Clausal complement of a verb or adjective
+ CCOMP = 11;
+
+ // Conjunct
+ CONJ = 12;
+
+ // Clausal subject
+ CSUBJ = 13;
+
+ // Clausal passive subject
+ CSUBJPASS = 14;
+
+ // Dependency (unable to determine)
+ DEP = 15;
+
+ // Determiner
+ DET = 16;
+
+ // Discourse
+ DISCOURSE = 17;
+
+ // Direct object
+ DOBJ = 18;
+
+ // Expletive
+ EXPL = 19;
+
+ // Goes with (part of a word in a text not well edited)
+ GOESWITH = 20;
+
+ // Indirect object
+ IOBJ = 21;
+
+ // Marker (word introducing a subordinate clause)
+ MARK = 22;
+
+ // Multi-word expression
+ MWE = 23;
+
+ // Multi-word verbal expression
+ MWV = 24;
+
+ // Negation modifier
+ NEG = 25;
+
+ // Noun compound modifier
+ NN = 26;
+
+ // Noun phrase used as an adverbial modifier
+ NPADVMOD = 27;
+
+ // Nominal subject
+ NSUBJ = 28;
+
+ // Passive nominal subject
+ NSUBJPASS = 29;
+
+ // Numeric modifier of a noun
+ NUM = 30;
+
+ // Element of compound number
+ NUMBER = 31;
+
+ // Punctuation mark
+ P = 32;
+
+ // Parataxis relation
+ PARATAXIS = 33;
+
+ // Participial modifier
+ PARTMOD = 34;
+
+ // The complement of a preposition is a clause
+ PCOMP = 35;
+
+ // Object of a preposition
+ POBJ = 36;
+
+ // Possession modifier
+ POSS = 37;
+
+ // Postverbal negative particle
+ POSTNEG = 38;
+
+ // Predicate complement
+ PRECOMP = 39;
+
+ // Preconjunt
+ PRECONJ = 40;
+
+ // Predeterminer
+ PREDET = 41;
+
+ // Prefix
+ PREF = 42;
+
+ // Prepositional modifier
+ PREP = 43;
+
+ // The relationship between a verb and verbal morpheme
+ PRONL = 44;
+
+ // Particle
+ PRT = 45;
+
+ // Associative or possessive marker
+ PS = 46;
+
+ // Quantifier phrase modifier
+ QUANTMOD = 47;
+
+ // Relative clause modifier
+ RCMOD = 48;
+
+ // Complementizer in relative clause
+ RCMODREL = 49;
+
+ // Ellipsis without a preceding predicate
+ RDROP = 50;
+
+ // Referent
+ REF = 51;
+
+ // Remnant
+ REMNANT = 52;
+
+ // Reparandum
+ REPARANDUM = 53;
+
+ // Root
+ ROOT = 54;
+
+ // Suffix specifying a unit of number
+ SNUM = 55;
+
+ // Suffix
+ SUFF = 56;
+
+ // Temporal modifier
+ TMOD = 57;
+
+ // Topic marker
+ TOPIC = 58;
+
+ // Clause headed by an infinite form of the verb that modifies a noun
+ VMOD = 59;
+
+ // Vocative
+ VOCATIVE = 60;
+
+ // Open clausal complement
+ XCOMP = 61;
+
+ // Name suffix
+ SUFFIX = 62;
+
+ // Name title
+ TITLE = 63;
+
+ // Adverbial phrase modifier
+ ADVPHMOD = 64;
+
+ // Causative auxiliary
+ AUXCAUS = 65;
+
+ // Helper auxiliary
+ AUXVV = 66;
+
+ // Rentaishi (Prenominal modifier)
+ DTMOD = 67;
+
+ // Foreign words
+ FOREIGN = 68;
+
+ // Keyword
+ KW = 69;
+
+ // List for chains of comparable items
+ LIST = 70;
+
+ // Nominalized clause
+ NOMC = 71;
+
+ // Nominalized clausal subject
+ NOMCSUBJ = 72;
+
+ // Nominalized clausal passive
+ NOMCSUBJPASS = 73;
+
+ // Compound of numeric modifier
+ NUMC = 74;
+
+ // Copula
+ COP = 75;
+
+ // Dislocated relation (for fronted/topicalized elements)
+ DISLOCATED = 76;
+
+ // Aspect marker
+ ASP = 77;
+
+ // Genitive modifier
+ GMOD = 78;
+
+ // Genitive object
+ GOBJ = 79;
+
+ // Infinitival modifier
+ INFMOD = 80;
+
+ // Measure
+ MES = 81;
+
+ // Nominal complement of a noun
+ NCOMP = 82;
+ }
+
+ // Represents the head of this token in the dependency tree.
+ // This is the index of the token which has an arc going to this token.
+ // The index is the position of the token in the array of tokens returned
+ // by the API method. If this token is a root token, then the
+ // `head_token_index` is its own index.
+ int32 head_token_index = 1;
+
+ // The parse label for the token.
+ Label label = 2;
+}
+
+// Represents a mention for an entity in the text. Currently, proper noun
+// mentions are supported.
+message EntityMention {
+ // The supported types of mentions.
+ enum Type {
+ // Unknown
+ TYPE_UNKNOWN = 0;
+
+ // Proper name
+ PROPER = 1;
+
+ // Common noun (or noun compound)
+ COMMON = 2;
+ }
+
+ // The mention text.
+ TextSpan text = 1;
+
+ // The type of the entity mention.
+ Type type = 2;
+
+ // For calls to [AnalyzeEntitySentiment][] or if
+ // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
+ // true, this field will contain the sentiment expressed for this mention of
+ // the entity in the provided document.
+ Sentiment sentiment = 3;
+}
+
+// Represents an output piece of text.
+message TextSpan {
+ // The content of the output text.
+ string content = 1;
+
+ // The API calculates the beginning offset of the content in the original
+ // document according to the [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the API request.
+ int32 begin_offset = 2;
+}
+
+// Represents a category returned from the text classifier.
+message ClassificationCategory {
+ // The name of the category representing the document.
+ string name = 1;
+
+ // The classifier's confidence of the category. Number represents how certain
+ // the classifier is that this category represents the given text.
+ float confidence = 2;
+}
+
+// The sentiment analysis request message.
+message AnalyzeSentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate sentence offsets for the
+ // sentence sentiment.
+ EncodingType encoding_type = 2;
+}
+
+// The sentiment analysis response message.
+message AnalyzeSentimentResponse {
+ // The overall sentiment of the input document.
+ Sentiment document_sentiment = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+
+ // The sentiment for all the sentences in the document.
+ repeated Sentence sentences = 3;
+}
+
+// The entity-level sentiment analysis request message.
+message AnalyzeEntitySentimentRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity-level sentiment analysis response message.
+message AnalyzeEntitySentimentResponse {
+ // The recognized entities in the input document with associated sentiments.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+}
+
+// The entity analysis request message.
+message AnalyzeEntitiesRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The entity analysis response message.
+message AnalyzeEntitiesResponse {
+ // The recognized entities in the input document.
+ repeated Entity entities = 1;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 2;
+}
+
+// The syntax analysis request message.
+message AnalyzeSyntaxRequest {
+ // Input document.
+ Document document = 1;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 2;
+}
+
+// The syntax analysis response message.
+message AnalyzeSyntaxResponse {
+ // Sentences in the input document.
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ repeated Token tokens = 2;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 3;
+}
+
+// The document classification request message.
+message ClassifyTextRequest {
+ // Input document.
+ Document document = 1;
+}
+
+// The document classification response message.
+message ClassifyTextResponse {
+ // Categories representing the input document.
+ repeated ClassificationCategory categories = 1;
+}
+
+// The request message for the text annotation API, which can perform multiple
+// analysis types (sentiment, entities, and syntax) in one call.
+message AnnotateTextRequest {
+ // All available features for sentiment, syntax, and semantic analysis.
+ // Setting each one to true will enable that specific analysis for the input.
+ message Features {
+ // Extract syntax information.
+ bool extract_syntax = 1;
+
+ // Extract entities.
+ bool extract_entities = 2;
+
+ // Extract document-level sentiment.
+ bool extract_document_sentiment = 3;
+
+ // Extract entities and their associated sentiment.
+ bool extract_entity_sentiment = 4;
+
+ // Classify the full document into categories.
+ bool classify_text = 6;
+ }
+
+ // Input document.
+ Document document = 1;
+
+ // The enabled features.
+ Features features = 2;
+
+ // The encoding type used by the API to calculate offsets.
+ EncodingType encoding_type = 3;
+}
+
+// The text annotations response message.
+message AnnotateTextResponse {
+ // Sentences in the input document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
+ repeated Sentence sentences = 1;
+
+ // Tokens, along with their syntactic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
+ repeated Token tokens = 2;
+
+ // Entities, along with their semantic information, in the input document.
+ // Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entities].
+ repeated Entity entities = 3;
+
+ // The overall sentiment for the document. Populated if the user enables
+ // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment].
+ Sentiment document_sentiment = 4;
+
+ // The language of the text, which will be the same as the language specified
+ // in the request or, if not specified, the automatically-detected language.
+ // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
+ string language = 5;
+
+ // Categories identified in the input document.
+ repeated ClassificationCategory categories = 6;
+}
+
+// Represents the text encoding that the caller uses to process the output.
+// Providing an `EncodingType` is recommended because the API provides the
+// beginning offsets for various outputs, such as tokens and mentions, and
+// languages that natively use different text encodings may access offsets
+// differently.
+enum EncodingType {
+ // If `EncodingType` is not specified, encoding-dependent information (such as
+ // `begin_offset`) will be set at `-1`.
+ NONE = 0;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-8 encoding of the input. C++ and Go are examples of languages
+ // that use this encoding natively.
+ UTF8 = 1;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-16 encoding of the input. Java and Javascript are examples of
+ // languages that use this encoding natively.
+ UTF16 = 2;
+
+ // Encoding-dependent information (such as `begin_offset`) is calculated based
+ // on the UTF-32 encoding of the input. Python is an example of a language
+ // that uses this encoding natively.
+ UTF32 = 3;
+}
diff --git a/packages/language/src/index.js b/packages/language/src/index.js
index 7919b904585..299b668726b 100644
--- a/packages/language/src/index.js
+++ b/packages/language/src/index.js
@@ -14,35 +14,49 @@
* limitations under the License.
*/
-/*!
- * @module language
- * @name Language
- */
-
+ /*!
+ * @module language
+ * @name Language
+ */
'use strict';
var extend = require('extend');
var gapic = {
v1: require('./v1'),
- v1beta2: require('./v1beta2')
+ v1beta2: require('./v1beta2'),
};
var gaxGrpc = require('google-gax').grpc();
+var path = require('path');
const VERSION = require('../package.json').version;
/**
- * Create a V1 languageServiceClient with additional helpers for common
+ * Create an languageServiceClient with additional helpers for common
* tasks.
*
* Provides text analysis operations such as sentiment analysis and entity
* recognition.
*
- * @constructor
- * @alias module:language
- *
* @param {object=} options - [Configuration object](#/docs).
+ * @param {object=} options.credentials - Credentials object.
+ * @param {string=} options.credentials.client_email
+ * @param {string=} options.credentials.private_key
+ * @param {string=} options.email - Account email address. Required when using a
+ * .pem or .p12 keyFilename.
+ * @param {string=} options.keyFilename - Full path to the a .json, .pem, or
+ * .p12 key downloaded from the Google Developers Console. If you provide
+ * a path to a JSON file, the projectId option above is not necessary.
+ * NOTE: .pem and .p12 require you to specify options.email as well.
* @param {number=} options.port - The port on which to connect to
* the remote host.
+ * @param {string=} options.projectId - The project ID from the Google
+ * Developer's Console, e.g. 'grape-spaceship-123'. We will also check
+ * the environment variable GCLOUD_PROJECT for your project ID. If your
+ * app is running in an environment which supports
+ * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials},
+ * your project ID will be detected automatically.
+ * @param {function=} options.promise - Custom promise module to use instead
+ * of native Promises.
* @param {string=} options.servicePath - The domain name of the
* API remote host.
*/
@@ -58,16 +72,41 @@ function languageV1(options) {
return client;
}
+var v1Protos = {};
+
+extend(v1Protos, gaxGrpc.loadProto(
+ path.join(__dirname, '..', 'protos',
+ 'google/cloud/language/v1/language_service.proto')
+).google.cloud.language.v1);
+
+
/**
- * Create a V1beta2 languageServiceClient with additional helpers for common
+ * Create an languageServiceClient with additional helpers for common
* tasks.
*
* Provides text analysis operations such as sentiment analysis and entity
* recognition.
*
* @param {object=} options - [Configuration object](#/docs).
+ * @param {object=} options.credentials - Credentials object.
+ * @param {string=} options.credentials.client_email
+ * @param {string=} options.credentials.private_key
+ * @param {string=} options.email - Account email address. Required when using a
+ * .pem or .p12 keyFilename.
+ * @param {string=} options.keyFilename - Full path to the a .json, .pem, or
+ * .p12 key downloaded from the Google Developers Console. If you provide
+ * a path to a JSON file, the projectId option above is not necessary.
+ * NOTE: .pem and .p12 require you to specify options.email as well.
* @param {number=} options.port - The port on which to connect to
* the remote host.
+ * @param {string=} options.projectId - The project ID from the Google
+ * Developer's Console, e.g. 'grape-spaceship-123'. We will also check
+ * the environment variable GCLOUD_PROJECT for your project ID. If your
+ * app is running in an environment which supports
+ * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials},
+ * your project ID will be detected automatically.
+ * @param {function=} options.promise - Custom promise module to use instead
+ * of native Promises.
* @param {string=} options.servicePath - The domain name of the
* API remote host.
*/
@@ -83,19 +122,13 @@ function languageV1beta2(options) {
return client;
}
-var v1Protos = {};
-
-extend(v1Protos, gaxGrpc.load([{
- root: require('google-proto-files')('..'),
- file: 'google/cloud/language/v1/language_service.proto'
-}]).google.cloud.language.v1);
-
var v1beta2Protos = {};
-extend(v1beta2Protos, gaxGrpc.load([{
- root: require('google-proto-files')('..'),
- file: 'google/cloud/language/v1beta2/language_service.proto'
-}]).google.cloud.language.v1beta2);
+extend(v1beta2Protos, gaxGrpc.loadProto(
+ path.join(__dirname, '..', 'protos',
+ 'google/cloud/language/v1beta2/language_service.proto')
+).google.cloud.language.v1beta2);
+
module.exports = languageV1;
module.exports.types = v1Protos;
diff --git a/packages/language/src/v1/doc/doc_language_service.js b/packages/language/src/v1/doc/doc_language_service.js
index afc86db9e26..9982df7a0e1 100644
--- a/packages/language/src/v1/doc/doc_language_service.js
+++ b/packages/language/src/v1/doc/doc_language_service.js
@@ -134,6 +134,14 @@ var Sentence = {
*
* This object should have the same structure as [EntityMention]{@link EntityMention}
*
+ * @property {Object} sentiment
+ * For calls to {@link AnalyzeEntitySentiment} or if
+ * {@link AnnotateTextRequest.Features.extract_entity_sentiment} is set to
+ * true, this field will contain the aggregate sentiment expressed for this
+ * entity in the provided document.
+ *
+ * This object should have the same structure as [Sentiment]{@link Sentiment}
+ *
* @class
* @see [google.cloud.language.v1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1/language_service.proto}
*/
@@ -1235,7 +1243,37 @@ var DependencyEdge = {
/**
* Dislocated relation (for fronted/topicalized elements)
*/
- DISLOCATED: 76
+ DISLOCATED: 76,
+
+ /**
+ * Aspect marker
+ */
+ ASP: 77,
+
+ /**
+ * Genitive modifier
+ */
+ GMOD: 78,
+
+ /**
+ * Genitive object
+ */
+ GOBJ: 79,
+
+ /**
+ * Infinitival modifier
+ */
+ INFMOD: 80,
+
+ /**
+ * Measure
+ */
+ MES: 81,
+
+ /**
+ * Nominal complement of a noun
+ */
+ NCOMP: 82
}
};
@@ -1253,6 +1291,14 @@ var DependencyEdge = {
*
* The number should be among the values of [Type]{@link Type}
*
+ * @property {Object} sentiment
+ * For calls to {@link AnalyzeEntitySentiment} or if
+ * {@link AnnotateTextRequest.Features.extract_entity_sentiment} is set to
+ * true, this field will contain the sentiment expressed for this mention of
+ * the entity in the provided document.
+ *
+ * This object should have the same structure as [Sentiment]{@link Sentiment}
+ *
* @class
* @see [google.cloud.language.v1.EntityMention definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1/language_service.proto}
*/
@@ -1345,6 +1391,46 @@ var AnalyzeSentimentResponse = {
// This is for documentation. Actual contents will be loaded by gRPC.
};
+/**
+ * The entity-level sentiment analysis request message.
+ *
+ * @property {Object} document
+ * Input document.
+ *
+ * This object should have the same structure as [Document]{@link Document}
+ *
+ * @property {number} encodingType
+ * The encoding type used by the API to calculate offsets.
+ *
+ * The number should be among the values of [EncodingType]{@link EncodingType}
+ *
+ * @class
+ * @see [google.cloud.language.v1.AnalyzeEntitySentimentRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1/language_service.proto}
+ */
+var AnalyzeEntitySentimentRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The entity-level sentiment analysis response message.
+ *
+ * @property {Object[]} entities
+ * The recognized entities in the input document with associated sentiments.
+ *
+ * This object should have the same structure as [Entity]{@link Entity}
+ *
+ * @property {string} language
+ * The language of the text, which will be the same as the language specified
+ * in the request or, if not specified, the automatically-detected language.
+ * See {@link Document.language} field for more details.
+ *
+ * @class
+ * @see [google.cloud.language.v1.AnalyzeEntitySentimentResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1/language_service.proto}
+ */
+var AnalyzeEntitySentimentResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
/**
* The entity analysis request message.
*
@@ -1468,6 +1554,9 @@ var AnnotateTextRequest = {
* @property {boolean} extractDocumentSentiment
* Extract document-level sentiment.
*
+ * @property {boolean} extractEntitySentiment
+ * Extract entities and their associated sentiment.
+ *
* @class
* @see [google.cloud.language.v1.AnnotateTextRequest.Features definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1/language_service.proto}
*/
diff --git a/packages/language/src/v1/index.js b/packages/language/src/v1/index.js
index b4f2c574e4e..c7808106836 100644
--- a/packages/language/src/v1/index.js
+++ b/packages/language/src/v1/index.js
@@ -27,8 +27,8 @@ function v1(options) {
return languageServiceClient(gaxGrpc);
}
-v1.GAPIC_VERSION = '0.7.1';
+v1.GAPIC_VERSION = '0.0.5';
v1.SERVICE_ADDRESS = languageServiceClient.SERVICE_ADDRESS;
v1.ALL_SCOPES = languageServiceClient.ALL_SCOPES;
-module.exports = v1;
+module.exports = v1;
\ No newline at end of file
diff --git a/packages/language/src/v1/language_service_client.js b/packages/language/src/v1/language_service_client.js
index faecdc5160f..4e59b4a86e2 100644
--- a/packages/language/src/v1/language_service_client.js
+++ b/packages/language/src/v1/language_service_client.js
@@ -30,12 +30,14 @@
var configData = require('./language_service_client_config');
var extend = require('extend');
var gax = require('google-gax');
+var googleProtoFiles = require('google-proto-files');
+var path = require('path');
var SERVICE_ADDRESS = 'language.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
-var CODE_GEN_NAME_VERSION = 'gapic/0.7.1';
+var CODE_GEN_NAME_VERSION = 'gapic/0.0.5';
/**
* The scopes needed to make gRPC calls to all of the methods defined in
@@ -52,7 +54,7 @@ var ALL_SCOPES = [
*
* @class
*/
-function LanguageServiceClient(gaxGrpc, grpcClients, opts) {
+function LanguageServiceClient(gaxGrpc, loadedProtos, opts) {
opts = extend({
servicePath: SERVICE_ADDRESS,
port: DEFAULT_SERVICE_PORT,
@@ -81,11 +83,12 @@ function LanguageServiceClient(gaxGrpc, grpcClients, opts) {
this.auth = gaxGrpc.auth;
var languageServiceStub = gaxGrpc.createStub(
- grpcClients.google.cloud.language.v1.LanguageService,
+ loadedProtos.google.cloud.language.v1.LanguageService,
opts);
var languageServiceStubMethods = [
'analyzeSentiment',
'analyzeEntities',
+ 'analyzeEntitySentiment',
'analyzeSyntax',
'annotateText'
];
@@ -178,7 +181,7 @@ LanguageServiceClient.prototype.analyzeSentiment = function(request, options, ca
* Input document.
*
* This object should have the same structure as [Document]{@link Document}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -202,12 +205,7 @@ LanguageServiceClient.prototype.analyzeSentiment = function(request, options, ca
* });
*
* var document = {};
- * var encodingType = language.v1.types.EncodingType.NONE;
- * var request = {
- * document: document,
- * encodingType: encodingType
- * };
- * client.analyzeEntities(request).then(function(responses) {
+ * client.analyzeEntities({document: document}).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
* })
@@ -227,6 +225,60 @@ LanguageServiceClient.prototype.analyzeEntities = function(request, options, cal
return this._analyzeEntities(request, options, callback);
};
+/**
+ * Finds entities, similar to {@link AnalyzeEntities} in the text and analyzes
+ * sentiment associated with each entity and its mentions.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object} request.document
+ * Input document.
+ *
+ * This object should have the same structure as [Document]{@link Document}
+ * @param {number=} request.encodingType
+ * The encoding type used by the API to calculate offsets.
+ *
+ * The number should be among the values of [EncodingType]{@link EncodingType}
+ * @param {Object=} options
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)=} callback
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [AnalyzeEntitySentimentResponse]{@link AnalyzeEntitySentimentResponse}.
+ * @return {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [AnalyzeEntitySentimentResponse]{@link AnalyzeEntitySentimentResponse}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * var language = require('@google-cloud/language');
+ *
+ * var client = language.v1({
+ * // optional auth parameters.
+ * });
+ *
+ * var document = {};
+ * client.analyzeEntitySentiment({document: document}).then(function(responses) {
+ * var response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(function(err) {
+ * console.error(err);
+ * });
+ */
+LanguageServiceClient.prototype.analyzeEntitySentiment = function(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ if (options === undefined) {
+ options = {};
+ }
+
+ return this._analyzeEntitySentiment(request, options, callback);
+};
+
/**
* Analyzes the syntax of the text and provides sentence boundaries and
* tokenization along with part of speech tags, dependency trees, and other
@@ -238,7 +290,7 @@ LanguageServiceClient.prototype.analyzeEntities = function(request, options, cal
* Input document.
*
* This object should have the same structure as [Document]{@link Document}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -262,12 +314,7 @@ LanguageServiceClient.prototype.analyzeEntities = function(request, options, cal
* });
*
* var document = {};
- * var encodingType = language.v1.types.EncodingType.NONE;
- * var request = {
- * document: document,
- * encodingType: encodingType
- * };
- * client.analyzeSyntax(request).then(function(responses) {
+ * client.analyzeSyntax({document: document}).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
* })
@@ -301,7 +348,7 @@ LanguageServiceClient.prototype.analyzeSyntax = function(request, options, callb
* The enabled features.
*
* This object should have the same structure as [Features]{@link Features}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -326,11 +373,9 @@ LanguageServiceClient.prototype.analyzeSyntax = function(request, options, callb
*
* var document = {};
* var features = {};
- * var encodingType = language.v1.types.EncodingType.NONE;
* var request = {
* document: document,
- * features: features,
- * encodingType: encodingType
+ * features: features
* };
* client.annotateText(request).then(function(responses) {
* var response = responses[0];
@@ -357,11 +402,9 @@ function LanguageServiceClientBuilder(gaxGrpc) {
return new LanguageServiceClientBuilder(gaxGrpc);
}
- var languageServiceClient = gaxGrpc.load([{
- root: require('google-proto-files')('..'),
- file: 'google/cloud/language/v1/language_service.proto'
- }]);
- extend(this, languageServiceClient.google.cloud.language.v1);
+ var languageServiceStubProtos = gaxGrpc.loadProto(
+ path.join(__dirname, '..', '..', 'protos', 'google/cloud/language/v1/language_service.proto'));
+ extend(this, languageServiceStubProtos.google.cloud.language.v1);
/**
@@ -379,10 +422,10 @@ function LanguageServiceClientBuilder(gaxGrpc) {
* {@link gax.constructSettings} for the format.
*/
this.languageServiceClient = function(opts) {
- return new LanguageServiceClient(gaxGrpc, languageServiceClient, opts);
+ return new LanguageServiceClient(gaxGrpc, languageServiceStubProtos, opts);
};
extend(this.languageServiceClient, LanguageServiceClient);
}
module.exports = LanguageServiceClientBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
-module.exports.ALL_SCOPES = ALL_SCOPES;
+module.exports.ALL_SCOPES = ALL_SCOPES;
\ No newline at end of file
diff --git a/packages/language/src/v1/language_service_client_config.json b/packages/language/src/v1/language_service_client_config.json
index 202d5b0d427..7c00b67d111 100644
--- a/packages/language/src/v1/language_service_client_config.json
+++ b/packages/language/src/v1/language_service_client_config.json
@@ -30,6 +30,11 @@
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
+ "AnalyzeEntitySentiment": {
+ "timeout_millis": 30000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
"AnalyzeSyntax": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
diff --git a/packages/language/src/v1beta2/doc/doc_language_service.js b/packages/language/src/v1beta2/doc/doc_language_service.js
index a340b2d45fd..472e10e8be7 100644
--- a/packages/language/src/v1beta2/doc/doc_language_service.js
+++ b/packages/language/src/v1beta2/doc/doc_language_service.js
@@ -1239,7 +1239,37 @@ var DependencyEdge = {
/**
* Dislocated relation (for fronted/topicalized elements)
*/
- DISLOCATED: 76
+ DISLOCATED: 76,
+
+ /**
+ * Aspect marker
+ */
+ ASP: 77,
+
+ /**
+ * Genitive modifier
+ */
+ GMOD: 78,
+
+ /**
+ * Genitive object
+ */
+ GOBJ: 79,
+
+ /**
+ * Infinitival modifier
+ */
+ INFMOD: 80,
+
+ /**
+ * Measure
+ */
+ MES: 81,
+
+ /**
+ * Nominal complement of a noun
+ */
+ NCOMP: 82
}
};
@@ -1312,6 +1342,23 @@ var TextSpan = {
// This is for documentation. Actual contents will be loaded by gRPC.
};
+/**
+ * Represents a category returned from the text classifier.
+ *
+ * @property {string} name
+ * The name of the category representing the document.
+ *
+ * @property {number} confidence
+ * The classifier's confidence of the category. Number represents how certain
+ * the classifier is that this category represents the given text.
+ *
+ * @class
+ * @see [google.cloud.language.v1beta2.ClassificationCategory definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1beta2/language_service.proto}
+ */
+var ClassificationCategory = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
/**
* The sentiment analysis request message.
*
@@ -1483,6 +1530,36 @@ var AnalyzeSyntaxResponse = {
// This is for documentation. Actual contents will be loaded by gRPC.
};
+/**
+ * The document classification request message.
+ *
+ * @property {Object} document
+ * Input document.
+ *
+ * This object should have the same structure as [Document]{@link Document}
+ *
+ * @class
+ * @see [google.cloud.language.v1beta2.ClassifyTextRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1beta2/language_service.proto}
+ */
+var ClassifyTextRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The document classification response message.
+ *
+ * @property {Object[]} categories
+ * Categories representing the input document.
+ *
+ * This object should have the same structure as [ClassificationCategory]{@link ClassificationCategory}
+ *
+ * @class
+ * @see [google.cloud.language.v1beta2.ClassifyTextResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1beta2/language_service.proto}
+ */
+var ClassifyTextResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
/**
* The request message for the text annotation API, which can perform multiple
* analysis types (sentiment, entities, and syntax) in one call.
@@ -1524,6 +1601,9 @@ var AnnotateTextRequest = {
* @property {boolean} extractEntitySentiment
* Extract entities and their associated sentiment.
*
+ * @property {boolean} classifyText
+ * Classify the full document into categories.
+ *
* @class
* @see [google.cloud.language.v1beta2.AnnotateTextRequest.Features definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1beta2/language_service.proto}
*/
@@ -1566,6 +1646,11 @@ var AnnotateTextRequest = {
* in the request or, if not specified, the automatically-detected language.
* See {@link Document.language} field for more details.
*
+ * @property {Object[]} categories
+ * Categories identified in the input document.
+ *
+ * This object should have the same structure as [ClassificationCategory]{@link ClassificationCategory}
+ *
* @class
* @see [google.cloud.language.v1beta2.AnnotateTextResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/language/v1beta2/language_service.proto}
*/
diff --git a/packages/language/src/v1beta2/index.js b/packages/language/src/v1beta2/index.js
index f65984ac290..eabebb8f311 100644
--- a/packages/language/src/v1beta2/index.js
+++ b/packages/language/src/v1beta2/index.js
@@ -27,8 +27,8 @@ function v1beta2(options) {
return languageServiceClient(gaxGrpc);
}
-v1beta2.GAPIC_VERSION = '0.7.1';
+v1beta2.GAPIC_VERSION = '0.0.5';
v1beta2.SERVICE_ADDRESS = languageServiceClient.SERVICE_ADDRESS;
v1beta2.ALL_SCOPES = languageServiceClient.ALL_SCOPES;
-module.exports = v1beta2;
+module.exports = v1beta2;
\ No newline at end of file
diff --git a/packages/language/src/v1beta2/language_service_client.js b/packages/language/src/v1beta2/language_service_client.js
index 985c526f05f..6c9e1d52537 100644
--- a/packages/language/src/v1beta2/language_service_client.js
+++ b/packages/language/src/v1beta2/language_service_client.js
@@ -30,12 +30,14 @@
var configData = require('./language_service_client_config');
var extend = require('extend');
var gax = require('google-gax');
+var googleProtoFiles = require('google-proto-files');
+var path = require('path');
var SERVICE_ADDRESS = 'language.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
-var CODE_GEN_NAME_VERSION = 'gapic/0.7.1';
+var CODE_GEN_NAME_VERSION = 'gapic/0.0.5';
/**
* The scopes needed to make gRPC calls to all of the methods defined in
@@ -52,7 +54,7 @@ var ALL_SCOPES = [
*
* @class
*/
-function LanguageServiceClient(gaxGrpc, grpcClients, opts) {
+function LanguageServiceClient(gaxGrpc, loadedProtos, opts) {
opts = extend({
servicePath: SERVICE_ADDRESS,
port: DEFAULT_SERVICE_PORT,
@@ -81,13 +83,14 @@ function LanguageServiceClient(gaxGrpc, grpcClients, opts) {
this.auth = gaxGrpc.auth;
var languageServiceStub = gaxGrpc.createStub(
- grpcClients.google.cloud.language.v1beta2.LanguageService,
+ loadedProtos.google.cloud.language.v1beta2.LanguageService,
opts);
var languageServiceStubMethods = [
'analyzeSentiment',
'analyzeEntities',
'analyzeEntitySentiment',
'analyzeSyntax',
+ 'classifyText',
'annotateText'
];
languageServiceStubMethods.forEach(function(methodName) {
@@ -180,7 +183,7 @@ LanguageServiceClient.prototype.analyzeSentiment = function(request, options, ca
* Input document.
*
* This object should have the same structure as [Document]{@link Document}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -204,12 +207,7 @@ LanguageServiceClient.prototype.analyzeSentiment = function(request, options, ca
* });
*
* var document = {};
- * var encodingType = language.v1beta2.types.EncodingType.NONE;
- * var request = {
- * document: document,
- * encodingType: encodingType
- * };
- * client.analyzeEntities(request).then(function(responses) {
+ * client.analyzeEntities({document: document}).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
* })
@@ -239,7 +237,7 @@ LanguageServiceClient.prototype.analyzeEntities = function(request, options, cal
* Input document.
*
* This object should have the same structure as [Document]{@link Document}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -263,12 +261,7 @@ LanguageServiceClient.prototype.analyzeEntities = function(request, options, cal
* });
*
* var document = {};
- * var encodingType = language.v1beta2.types.EncodingType.NONE;
- * var request = {
- * document: document,
- * encodingType: encodingType
- * };
- * client.analyzeEntitySentiment(request).then(function(responses) {
+ * client.analyzeEntitySentiment({document: document}).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
* })
@@ -299,7 +292,7 @@ LanguageServiceClient.prototype.analyzeEntitySentiment = function(request, optio
* Input document.
*
* This object should have the same structure as [Document]{@link Document}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -323,12 +316,7 @@ LanguageServiceClient.prototype.analyzeEntitySentiment = function(request, optio
* });
*
* var document = {};
- * var encodingType = language.v1beta2.types.EncodingType.NONE;
- * var request = {
- * document: document,
- * encodingType: encodingType
- * };
- * client.analyzeSyntax(request).then(function(responses) {
+ * client.analyzeSyntax({document: document}).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
* })
@@ -349,8 +337,57 @@ LanguageServiceClient.prototype.analyzeSyntax = function(request, options, callb
};
/**
- * A convenience method that provides all syntax, sentiment, and entity
- * features in one call.
+ * Classifies a document into categories.
+ *
+ * @param {Object} request
+ * The request object that will be sent.
+ * @param {Object} request.document
+ * Input document.
+ *
+ * This object should have the same structure as [Document]{@link Document}
+ * @param {Object=} options
+ * Optional parameters. You can override the default settings for this call, e.g, timeout,
+ * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
+ * @param {function(?Error, ?Object)=} callback
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing [ClassifyTextResponse]{@link ClassifyTextResponse}.
+ * @return {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing [ClassifyTextResponse]{@link ClassifyTextResponse}.
+ * The promise has a method named "cancel" which cancels the ongoing API call.
+ *
+ * @example
+ *
+ * var language = require('@google-cloud/language');
+ *
+ * var client = language.v1beta2({
+ * // optional auth parameters.
+ * });
+ *
+ * var document = {};
+ * client.classifyText({document: document}).then(function(responses) {
+ * var response = responses[0];
+ * // doThingsWith(response)
+ * })
+ * .catch(function(err) {
+ * console.error(err);
+ * });
+ */
+LanguageServiceClient.prototype.classifyText = function(request, options, callback) {
+ if (options instanceof Function && callback === undefined) {
+ callback = options;
+ options = {};
+ }
+ if (options === undefined) {
+ options = {};
+ }
+
+ return this._classifyText(request, options, callback);
+};
+
+/**
+ * A convenience method that provides all syntax, sentiment, entity, and
+ * classification features in one call.
*
* @param {Object} request
* The request object that will be sent.
@@ -362,7 +399,7 @@ LanguageServiceClient.prototype.analyzeSyntax = function(request, options, callb
* The enabled features.
*
* This object should have the same structure as [Features]{@link Features}
- * @param {number} request.encodingType
+ * @param {number=} request.encodingType
* The encoding type used by the API to calculate offsets.
*
* The number should be among the values of [EncodingType]{@link EncodingType}
@@ -387,11 +424,9 @@ LanguageServiceClient.prototype.analyzeSyntax = function(request, options, callb
*
* var document = {};
* var features = {};
- * var encodingType = language.v1beta2.types.EncodingType.NONE;
* var request = {
* document: document,
- * features: features,
- * encodingType: encodingType
+ * features: features
* };
* client.annotateText(request).then(function(responses) {
* var response = responses[0];
@@ -418,11 +453,9 @@ function LanguageServiceClientBuilder(gaxGrpc) {
return new LanguageServiceClientBuilder(gaxGrpc);
}
- var languageServiceClient = gaxGrpc.load([{
- root: require('google-proto-files')('..'),
- file: 'google/cloud/language/v1beta2/language_service.proto'
- }]);
- extend(this, languageServiceClient.google.cloud.language.v1beta2);
+ var languageServiceStubProtos = gaxGrpc.loadProto(
+ path.join(__dirname, '..', '..', 'protos', 'google/cloud/language/v1beta2/language_service.proto'));
+ extend(this, languageServiceStubProtos.google.cloud.language.v1beta2);
/**
@@ -440,10 +473,10 @@ function LanguageServiceClientBuilder(gaxGrpc) {
* {@link gax.constructSettings} for the format.
*/
this.languageServiceClient = function(opts) {
- return new LanguageServiceClient(gaxGrpc, languageServiceClient, opts);
+ return new LanguageServiceClient(gaxGrpc, languageServiceStubProtos, opts);
};
extend(this.languageServiceClient, LanguageServiceClient);
}
module.exports = LanguageServiceClientBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
-module.exports.ALL_SCOPES = ALL_SCOPES;
+module.exports.ALL_SCOPES = ALL_SCOPES;
\ No newline at end of file
diff --git a/packages/language/src/v1beta2/language_service_client_config.json b/packages/language/src/v1beta2/language_service_client_config.json
index 8018f8a7bbf..100eba5ffde 100644
--- a/packages/language/src/v1beta2/language_service_client_config.json
+++ b/packages/language/src/v1beta2/language_service_client_config.json
@@ -40,6 +40,11 @@
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
+ "ClassifyText": {
+ "timeout_millis": 30000,
+ "retry_codes_name": "idempotent",
+ "retry_params_name": "default"
+ },
"AnnotateText": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
diff --git a/packages/language/test/gapic-v1.js b/packages/language/test/gapic-v1.js
index dd541d37017..268e21d2fd5 100644
--- a/packages/language/test/gapic-v1.js
+++ b/packages/language/test/gapic-v1.js
@@ -16,7 +16,7 @@
'use strict';
var assert = require('assert');
-var languageV1 = require('../src/v1')();
+var language = require('../src');
var FAKE_STATUS_CODE = 1;
var error = new Error();
@@ -25,7 +25,8 @@ error.code = FAKE_STATUS_CODE;
describe('LanguageServiceClient', function() {
describe('analyzeSentiment', function() {
it('invokes analyzeSentiment without error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
var request = {
@@ -33,9 +34,9 @@ describe('LanguageServiceClient', function() {
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -49,7 +50,8 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeSentiment with error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
var request = {
@@ -69,19 +71,18 @@ describe('LanguageServiceClient', function() {
describe('analyzeEntities', function() {
it('invokes analyzeEntities without error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -95,13 +96,12 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeEntities with error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock Grpc layer
@@ -115,21 +115,66 @@ describe('LanguageServiceClient', function() {
});
});
+ describe('analyzeEntitySentiment', function() {
+ it('invokes analyzeEntitySentiment without error', function(done) {
+ var client = language.v1();
+
+ // Mock request
+ var document = {};
+ var request = {
+ document : document
+ };
+
+ // Mock response
+ var language_ = 'language-1613589672';
+ var expectedResponse = {
+ language : language_
+ };
+
+ // Mock Grpc layer
+ client._analyzeEntitySentiment = mockSimpleGrpcMethod(request, expectedResponse);
+
+ client.analyzeEntitySentiment(request, function(err, response) {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes analyzeEntitySentiment with error', function(done) {
+ var client = language.v1();
+
+ // Mock request
+ var document = {};
+ var request = {
+ document : document
+ };
+
+ // Mock Grpc layer
+ client._analyzeEntitySentiment = mockSimpleGrpcMethod(request, null, error);
+
+ client.analyzeEntitySentiment(request, function(err, response) {
+ assert(err instanceof Error);
+ assert.equal(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
describe('analyzeSyntax', function() {
it('invokes analyzeSyntax without error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -143,13 +188,12 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeSyntax with error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock Grpc layer
@@ -165,21 +209,20 @@ describe('LanguageServiceClient', function() {
describe('annotateText', function() {
it('invokes annotateText without error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
var features = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
document : document,
- features : features,
- encodingType : encodingType
+ features : features
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -193,15 +236,14 @@ describe('LanguageServiceClient', function() {
});
it('invokes annotateText with error', function(done) {
- var client = languageV1.languageServiceClient();
+ var client = language.v1();
+
// Mock request
var document = {};
var features = {};
- var encodingType = languageV1.EncodingType.NONE;
var request = {
document : document,
- features : features,
- encodingType : encodingType
+ features : features
};
// Mock Grpc layer
diff --git a/packages/language/test/gapic-v1beta2.js b/packages/language/test/gapic-v1beta2.js
index 8c2c944563f..6614ae65865 100644
--- a/packages/language/test/gapic-v1beta2.js
+++ b/packages/language/test/gapic-v1beta2.js
@@ -16,7 +16,7 @@
'use strict';
var assert = require('assert');
-var languageV1beta2 = require('../src/v1beta2')();
+var language = require('../src');
var FAKE_STATUS_CODE = 1;
var error = new Error();
@@ -25,7 +25,8 @@ error.code = FAKE_STATUS_CODE;
describe('LanguageServiceClient', function() {
describe('analyzeSentiment', function() {
it('invokes analyzeSentiment without error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
var request = {
@@ -33,9 +34,9 @@ describe('LanguageServiceClient', function() {
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -49,7 +50,8 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeSentiment with error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
var request = {
@@ -69,19 +71,18 @@ describe('LanguageServiceClient', function() {
describe('analyzeEntities', function() {
it('invokes analyzeEntities without error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -95,13 +96,12 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeEntities with error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock Grpc layer
@@ -117,19 +117,18 @@ describe('LanguageServiceClient', function() {
describe('analyzeEntitySentiment', function() {
it('invokes analyzeEntitySentiment without error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -143,13 +142,12 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeEntitySentiment with error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock Grpc layer
@@ -165,19 +163,18 @@ describe('LanguageServiceClient', function() {
describe('analyzeSyntax', function() {
it('invokes analyzeSyntax without error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -191,13 +188,12 @@ describe('LanguageServiceClient', function() {
});
it('invokes analyzeSyntax with error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
- document : document,
- encodingType : encodingType
+ document : document
};
// Mock Grpc layer
@@ -211,23 +207,65 @@ describe('LanguageServiceClient', function() {
});
});
+ describe('classifyText', function() {
+ it('invokes classifyText without error', function(done) {
+ var client = language.v1beta2();
+
+ // Mock request
+ var document = {};
+ var request = {
+ document : document
+ };
+
+ // Mock response
+ var expectedResponse = {};
+
+ // Mock Grpc layer
+ client._classifyText = mockSimpleGrpcMethod(request, expectedResponse);
+
+ client.classifyText(request, function(err, response) {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes classifyText with error', function(done) {
+ var client = language.v1beta2();
+
+ // Mock request
+ var document = {};
+ var request = {
+ document : document
+ };
+
+ // Mock Grpc layer
+ client._classifyText = mockSimpleGrpcMethod(request, null, error);
+
+ client.classifyText(request, function(err, response) {
+ assert(err instanceof Error);
+ assert.equal(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
describe('annotateText', function() {
it('invokes annotateText without error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
var features = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
document : document,
- features : features,
- encodingType : encodingType
+ features : features
};
// Mock response
- var language = 'language-1613589672';
+ var language_ = 'language-1613589672';
var expectedResponse = {
- language : language
+ language : language_
};
// Mock Grpc layer
@@ -241,15 +279,14 @@ describe('LanguageServiceClient', function() {
});
it('invokes annotateText with error', function(done) {
- var client = languageV1beta2.languageServiceClient();
+ var client = language.v1beta2();
+
// Mock request
var document = {};
var features = {};
- var encodingType = languageV1beta2.EncodingType.NONE;
var request = {
document : document,
- features : features,
- encodingType : encodingType
+ features : features
};
// Mock Grpc layer