diff --git a/cloud-language/snippets/analyze.v1.js b/cloud-language/snippets/analyze.v1.js index 590ddf30ae..f506f78dfc 100644 --- a/cloud-language/snippets/analyze.v1.js +++ b/cloud-language/snippets/analyze.v1.js @@ -15,7 +15,7 @@ 'use strict'; -function analyzeSentimentOfText(text) { +async function analyzeSentimentOfText(text) { // [START language_sentiment_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -35,28 +35,24 @@ function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document}); + + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); + // [END language_sentiment_text] } -function analyzeSentimentInFile(bucketName, fileName) { +async function analyzeSentimentInFile(bucketName, fileName) { // [START language_sentiment_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -77,28 +73,23 @@ function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document}); + + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); // [END language_sentiment_gcs] } -function analyzeEntitiesOfText(text) { +async function analyzeEntitiesOfText(text) { // [START language_entities_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -118,27 +109,22 @@ function analyzeEntitiesOfText(text) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document}); + + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); // [END language_entities_text] } -function analyzeEntitiesInFile(bucketName, fileName) { +async function analyzeEntitiesInFile(bucketName, fileName) { // [START language_entities_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -159,27 +145,22 @@ function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_gcs] } -function analyzeSyntaxOfText(text) { +async function analyzeSyntaxOfText(text) { // [START language_syntax_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -199,24 +180,17 @@ function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; - - console.log('Tokens:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [syntax] = await client.analyzeSyntax({document}); + + console.log('Tokens:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_text] } -function analyzeSyntaxInFile(bucketName, fileName) { +async function analyzeSyntaxInFile(bucketName, fileName) { // [START language_syntax_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -237,24 +211,17 @@ function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; - - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [syntax] = await client.analyzeSyntax({document}); + + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_gcs] } -function analyzeEntitySentimentOfText(text) { +async function analyzeEntitySentimentOfText(text) { // [START language_entity_sentiment_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -274,26 +241,20 @@ function analyzeEntitySentimentOfText(text) { }; // Detects sentiment of entities in the document - client - .analyzeEntitySentiment({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log(`Entities and sentiments:`); - entities.forEach(entity => { - console.log(` Name: ${entity.name}`); - console.log(` Type: ${entity.type}`); - console.log(` Score: ${entity.sentiment.score}`); - console.log(` Magnitude: ${entity.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntitySentiment({document}); + const entities = result.entities; + + console.log(`Entities and sentiments:`); + entities.forEach(entity => { + console.log(` Name: ${entity.name}`); + console.log(` Type: ${entity.type}`); + console.log(` Score: ${entity.sentiment.score}`); + console.log(` Magnitude: ${entity.sentiment.magnitude}`); + }); // [END language_entity_sentiment_text] } -function analyzeEntitySentimentInFile(bucketName, fileName) { +async function analyzeEntitySentimentInFile(bucketName, fileName) { // [START language_entity_sentiment_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -314,26 +275,20 @@ function analyzeEntitySentimentInFile(bucketName, fileName) { }; // Detects sentiment of entities in the document - client - .analyzeEntitySentiment({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log(`Entities and sentiments:`); - entities.forEach(entity => { - console.log(` Name: ${entity.name}`); - console.log(` Type: ${entity.type}`); - console.log(` Score: ${entity.sentiment.score}`); - console.log(` Magnitude: ${entity.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntitySentiment({document}); + const entities = result.entities; + + console.log(`Entities and sentiments:`); + entities.forEach(entity => { + console.log(` Name: ${entity.name}`); + console.log(` Type: ${entity.type}`); + console.log(` Score: ${entity.sentiment.score}`); + console.log(` Magnitude: ${entity.sentiment.magnitude}`); + }); // [END language_entity_sentiment_gcs] } -function classifyTextOfText(text) { +async function classifyTextOfText(text) { // [START language_classify_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -353,25 +308,15 @@ function classifyTextOfText(text) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document}); + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_text] } -function classifyTextInFile(bucketName, fileName) { +async function classifyTextInFile(bucketName, fileName) { // [START language_classify_gcs] // Imports the Google Cloud client library. const language = require('@google-cloud/language'); @@ -392,116 +337,114 @@ function classifyTextInFile(bucketName, fileName) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document}); + + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_gcs] } -require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - opts => analyzeSentimentOfText(opts.text) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) - ) - .command(`entities-text `, `Detects entities in a string.`, {}, opts => - analyzeEntitiesOfText(opts.text) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) - ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) - ) - .command( - `entity-sentiment-text `, - `Detects sentiment of the entities in a string.`, - {}, - opts => analyzeEntitySentimentOfText(opts.text) - ) - .command( - `entity-sentiment-file `, - `Detects sentiment of the entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) - ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - opts => classifyTextInFile(opts.bucketName, opts.fileName) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 entity-sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entity-sentiment-file my-bucket file.txt`, - `Detects sentiment of entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; +async function main() { + require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + opts => analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command( + `entity-sentiment-text `, + `Detects sentiment of the entities in a string.`, + {}, + opts => analyzeEntitySentimentOfText(opts.text) + ) + .command( + `entity-sentiment-file `, + `Detects sentiment of the entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => classifyTextInFile(opts.bucketName, opts.fileName) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 entity-sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entity-sentiment-file my-bucket file.txt`, + `Detects sentiment of entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/cloud-language/snippets/analyze.v1beta2.js b/cloud-language/snippets/analyze.v1beta2.js index 18de6fa5ac..c393a165f5 100644 --- a/cloud-language/snippets/analyze.v1beta2.js +++ b/cloud-language/snippets/analyze.v1beta2.js @@ -15,7 +15,7 @@ 'use strict'; -function analyzeSentimentOfText(text) { +async function analyzeSentimentOfText(text) { // [START language_sentiment_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -35,28 +35,22 @@ function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document}); + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); // [END language_sentiment_string] } -function analyzeSentimentInFile(bucketName, fileName) { +async function analyzeSentimentInFile(bucketName, fileName) { // [START language_sentiment_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -77,28 +71,23 @@ function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document}); + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); + // [END language_sentiment_file] } -function analyzeEntitiesOfText(text) { +async function analyzeEntitiesOfText(text) { // [START language_entities_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -118,27 +107,22 @@ function analyzeEntitiesOfText(text) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_string] } -function analyzeEntitiesInFile(bucketName, fileName) { +async function analyzeEntitiesInFile(bucketName, fileName) { // [START language_entities_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -159,27 +143,22 @@ function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_file] } -function analyzeSyntaxOfText(text) { +async function analyzeSyntaxOfText(text) { // [START language_syntax_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -199,24 +178,18 @@ function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; + const [syntax] = await client.analyzeSyntax({document}); + + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); // [END language_syntax_string] } -function analyzeSyntaxInFile(bucketName, fileName) { +async function analyzeSyntaxInFile(bucketName, fileName) { // [START language_syntax_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -237,24 +210,17 @@ function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; + const [syntax] = await client.analyzeSyntax({document}); - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_file] } -function classifyTextOfText(text) { +async function classifyTextOfText(text) { // [START language_classify_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -274,21 +240,11 @@ function classifyTextOfText(text) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document}); + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_string] } @@ -314,7 +270,7 @@ function classifyTextInFile(bucketName, fileName) { // Classifies text in the document client - .classifyText({document: document}) + .classifyText({document}) .then(results => { const classification = results[0]; @@ -331,79 +287,87 @@ function classifyTextInFile(bucketName, fileName) { // [END language_classify_file] } -require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - opts => analyzeSentimentOfText(opts.text) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) - ) - .command(`entities-text `, `Detects entities in a string.`, {}, opts => - analyzeEntitiesOfText(opts.text) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) - ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) - ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - opts => classifyTextInFile(opts.bucketName, opts.fileName) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; +async function main() { + require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + opts => analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => + classifyTextInFile(opts.bucketName, opts.fileName).catch(console.error) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/cloud-language/snippets/automl/automlNaturalLanguageDataset.js b/cloud-language/snippets/automl/automlNaturalLanguageDataset.js index 65c4db8b63..b6563d4e4a 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguageDataset.js +++ b/cloud-language/snippets/automl/automlNaturalLanguageDataset.js @@ -23,7 +23,12 @@ `use strict`; -function createDataset(projectId, computeRegion, datasetName, multilabel) { +async function createDataset( + projectId, + computeRegion, + datasetName, + multilabel +) { // [START automl_natural_language_createDataset] const automl = require(`@google-cloud/automl`); @@ -55,31 +60,26 @@ function createDataset(projectId, computeRegion, datasetName, multilabel) { }; // Create a dataset with the dataset metadata in the region. - client - .createDataset({parent: projectLocation, dataset: myDataset}) - .then(responses => { - const dataset = responses[0]; - - // Display the dataset information. - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log(`Text classification type:`); - console.log( - `\t ${dataset.textClassificationDatasetMetadata.classificationType}` - ); - console.log(`Dataset create time:`); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - }) - .catch(err => { - console.error(err); - }); + const [dataset] = await client.createDataset({ + parent: projectLocation, + dataset: myDataset, + }); + // Display the dataset information. + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log(`Text classification type:`); + console.log( + `\t ${dataset.textClassificationDatasetMetadata.classificationType}` + ); + console.log(`Dataset create time:`); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); // [END automl_natural_language_createDataset] } -function listDatasets(projectId, computeRegion, filter) { +async function listDatasets(projectId, computeRegion, filter) { // [START automl_natural_language_listDatasets] const automl = require(`@google-cloud/automl`); @@ -96,35 +96,30 @@ function listDatasets(projectId, computeRegion, filter) { const projectLocation = client.locationPath(projectId, computeRegion); // List all the datasets available in the region by applying filter. - client - .listDatasets({parent: projectLocation, filter: filter}) - .then(responses => { - const datasets = responses[0]; - - // Display the dataset information. - console.log(`List of datasets:`); - datasets.forEach(dataset => { - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log(`Text classification type:`); - console.log( - `\t ${dataset.textClassificationDatasetMetadata.classificationType}` - ); - console.log(`Dataset create time: `); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - console.log(`\n`); - }); - }) - .catch(err => { - console.error(err); - }); + const [datasets] = await client.listDatasets({ + parent: projectLocation, + filter: filter, + }); + // Display the dataset information. + console.log(`List of datasets:`); + datasets.forEach(dataset => { + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log(`Text classification type:`); + console.log( + `\t ${dataset.textClassificationDatasetMetadata.classificationType}` + ); + console.log(`Dataset create time: `); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); + console.log(`\n`); + }); // [END automl_natural_language_listDatasets] } -function getDataset(projectId, computeRegion, datasetId) { +async function getDataset(projectId, computeRegion, datasetId) { // [START automl_natural_language_getDataset] const automl = require(`@google-cloud/automl`); @@ -141,32 +136,24 @@ function getDataset(projectId, computeRegion, datasetId) { const datasetFullId = client.datasetPath(projectId, computeRegion, datasetId); // Get complete detail of the dataset. - client - .getDataset({name: datasetFullId}) - .then(responses => { - const dataset = responses[0]; - - // Display the dataset information. - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log( - `Text classification type: ${ - dataset.textClassificationDatasetMetadata.classificationType - }` - ); - console.log(`Dataset create time: `); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - }) - .catch(err => { - console.error(err); - }); + const [dataset] = await client.getDataset({name: datasetFullId}); + // Display the dataset information. + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log( + `Text classification type: ${ + dataset.textClassificationDatasetMetadata.classificationType + }` + ); + console.log(`Dataset create time: `); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); // [END automl_natural_language_getDataset] } -function importData(projectId, computeRegion, datasetId, path) { +async function importData(projectId, computeRegion, datasetId, path) { // [START automl_natural_language_importDataset] const automl = require(`@google-cloud/automl`); @@ -192,24 +179,19 @@ function importData(projectId, computeRegion, datasetId, path) { }; // Import the dataset from the input URI. - client - .importData({name: datasetFullId, inputConfig: inputConfig}) - .then(responses => { - const operation = responses[0]; - console.log(`Processing import...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Data imported.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.importData({ + name: datasetFullId, + inputConfig: inputConfig, + }); + console.log(`Processing import...`); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Data imported.`); + // [END automl_natural_language_importDataset] } -function exportData(projectId, computeRegion, datasetId, outputUri) { +async function exportData(projectId, computeRegion, datasetId, outputUri) { // [START automl_natural_language_exportDataset] const automl = require(`@google-cloud/automl`); @@ -234,24 +216,18 @@ function exportData(projectId, computeRegion, datasetId, outputUri) { }; // Export the data to the output URI. - client - .exportData({name: datasetFullId, outputConfig: outputConfig}) - .then(responses => { - const operation = responses[0]; - console.log(`Processing export...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Data exported.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.exportData({ + name: datasetFullId, + outputConfig: outputConfig, + }); + console.log(`Processing export...`); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Data exported.`); // [END automl_natural_language_exportDataset] } -function deleteDataset(projectId, computeRegion, datasetId) { +async function deleteDataset(projectId, computeRegion, datasetId) { // [START automl_natural_language_deleteDataset] const automl = require(`@google-cloud/automl`); @@ -268,126 +244,121 @@ function deleteDataset(projectId, computeRegion, datasetId) { const datasetFullId = client.datasetPath(projectId, computeRegion, datasetId); // Delete a dataset. - client - .deleteDataset({name: datasetFullId}) - .then(responses => { - const operation = responses[0]; - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Dataset deleted.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = await client.deleteDataset({name: datasetFullId}); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Dataset deleted.`); // [END automl_natural_language_deleteDataset] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetName: { - alias: `n`, - type: `string`, - default: `testDataSet`, - requiresArg: true, - description: `Name of the Dataset`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: `text_classification_dataset_metadata:*`, - type: `string`, - requiresArg: false, - description: `filter expression`, - }, - multilabel: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: - `Type of the classification problem, ` + - `False - MULTICLASS, True - MULTILABEL.`, - }, - outputUri: { - alias: `o`, - type: `string`, - requiresArg: true, - description: `URI (or local path) to export dataset`, - }, - path: { - alias: `p`, - type: `string`, - global: true, - default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, - requiresArg: true, - description: `URI or local path to input .csv, or array of .csv paths`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - }) - .command(`create-dataset`, `creates a new Dataset`, {}, opts => - createDataset( - opts.projectId, - opts.computeRegion, - opts.datasetName, - opts.multilabel - ) - ) - .command(`list-datasets`, `list all Datasets`, {}, opts => - listDatasets(opts.projectId, opts.computeRegion, opts.filter) - ) - .command(`get-dataset`, `Get a Dataset`, {}, opts => - getDataset(opts.projectId, opts.computeRegion, opts.datasetId) - ) - .command(`delete-dataset`, `Delete a dataset`, {}, opts => - deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) - ) - .command(`import-data`, `Import labeled items into dataset`, {}, opts => - importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) - ) - .command( - `export-data`, - `Export a dataset to a Google Cloud Storage Bucket`, - {}, - opts => - exportData( +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetName: { + alias: `n`, + type: `string`, + default: `testDataSet`, + requiresArg: true, + description: `Name of the Dataset`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: `text_classification_dataset_metadata:*`, + type: `string`, + requiresArg: false, + description: `filter expression`, + }, + multilabel: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: + `Type of the classification problem, ` + + `False - MULTICLASS, True - MULTILABEL.`, + }, + outputUri: { + alias: `o`, + type: `string`, + requiresArg: true, + description: `URI (or local path) to export dataset`, + }, + path: { + alias: `p`, + type: `string`, + global: true, + default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, + requiresArg: true, + description: `URI or local path to input .csv, or array of .csv paths`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + }) + .command(`create-dataset`, `creates a new Dataset`, {}, opts => + createDataset( opts.projectId, opts.computeRegion, - opts.datasetId, - opts.outputUri + opts.datasetName, + opts.multilabel ) - ) - .example(`node $0 create-dataset -n "newDataSet"`) - .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) - .example(`node $0 get-dataset -i "DATASETID"`) - .example(`node $0 delete-dataset -i "DATASETID"`) - .example( - `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` - ) - .example( - `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` - ) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + ) + .command(`list-datasets`, `list all Datasets`, {}, opts => + listDatasets(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-dataset`, `Get a Dataset`, {}, opts => + getDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`delete-dataset`, `Delete a dataset`, {}, opts => + deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`import-data`, `Import labeled items into dataset`, {}, opts => + importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) + ) + .command( + `export-data`, + `Export a dataset to a Google Cloud Storage Bucket`, + {}, + opts => + exportData( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.outputUri + ) + ) + .example(`node $0 create-dataset -n "newDataSet"`) + .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) + .example(`node $0 get-dataset -i "DATASETID"`) + .example(`node $0 delete-dataset -i "DATASETID"`) + .example( + `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` + ) + .example( + `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` + ) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/cloud-language/snippets/automl/automlNaturalLanguageModel.js b/cloud-language/snippets/automl/automlNaturalLanguageModel.js index a20472b8c9..96492852c0 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguageModel.js +++ b/cloud-language/snippets/automl/automlNaturalLanguageModel.js @@ -23,7 +23,7 @@ `use strict`; -function createModel(projectId, computeRegion, datasetId, modelName) { +async function createModel(projectId, computeRegion, datasetId, modelName) { // [START automl_natural_language_createModel] const automl = require(`@google-cloud/automl`); @@ -48,44 +48,33 @@ function createModel(projectId, computeRegion, datasetId, modelName) { }; // Create a model with the model metadata in the region. - client - .createModel({parent: projectLocation, model: myModel}) - .then(responses => { - const operation = responses[0]; - const initialApiResponse = responses[1]; - - console.log(`Training operation name: ${initialApiResponse.name}`); - console.log(`Training started...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - const model = responses[0]; - - // Retrieve deployment state. - let deploymentState = ``; - if (model.deploymentState === 1) { - deploymentState = `deployed`; - } else if (model.deploymentState === 2) { - deploymentState = `undeployed`; - } - - // Display the model information. - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model deployment state: ${deploymentState}`); - }) - .catch(err => { - console.error(err); - }); + const [operation, initialApiResponse] = await client.createModel({ + parent: projectLocation, + model: myModel, + }); + console.log(`Training operation name: ${initialApiResponse.name}`); + console.log(`Training started...`); + const [model] = await operation.promise(); + // Retrieve deployment state. + let deploymentState = ``; + if (model.deploymentState === 1) { + deploymentState = `deployed`; + } else if (model.deploymentState === 2) { + deploymentState = `undeployed`; + } + + // Display the model information. + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model deployment state: ${deploymentState}`); // [END automl_natural_language_createModel] } -function getOperationStatus(operationFullId) { +async function getOperationStatus(operationFullId) { // [START automl_natural_language_getOperationStatus] const automl = require(`@google-cloud/automl`); @@ -98,14 +87,14 @@ function getOperationStatus(operationFullId) { // Get the latest state of a long-running operation. // Get the latest state of a long-running operation. - client.operationsClient.getOperation(operationFullId).then(responses => { - const response = responses[0]; - console.log(`Operation status: ${response}`); - }); + const [response] = await client.operationsClient.getOperation( + operationFullId + ); + console.log(`Operation status: ${response}`); // [END automl_natural_language_getOperationStatus] } -function listModels(projectId, computeRegion, filter) { +async function listModels(projectId, computeRegion, filter) { // [START automl_natural_language_listModels] const automl = require(`@google-cloud/automl`); @@ -123,80 +112,65 @@ function listModels(projectId, computeRegion, filter) { // List all the models available in the region by applying filter. if (filter === ``) filter = `textClassificationModelMetadata:*`; - client - .listModels({ - parent: projectLocation, - filter: filter, - }) - .then(responses => { - const models = responses[0]; - - // Display the model information. - console.log(`List of models:`); - models.forEach(model => { - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model dataset id: ${model.datasetId}`); - if (model.modelMetadata === `translationModelMetadata`) { - console.log(`Translation model metadata:`); - console.log( - `\tBase model: ${model.translationModelMetadata.baseModel}` - ); - console.log( - `\tSource language code: ${ - model.translationModelMetadata.sourceLanguageCode - }` - ); - console.log( - `\tTarget language code: ${ - model.translationModelMetadata.targetLanguageCode - }` - ); - } else if (model.modelMetadata === `textClassificationModelMetadata`) { - console.log( - `Text classification model metadata: ${ - model.textClassificationModelMetadata - }` - ); - } else if (model.modelMetadata === `imageClassificationModelMetadata`) { - console.log(`Image classification model metadata:`); - console.log( - `\tBase model id: ${ - model.imageClassificationModelMetadata.baseModelId - }` - ); - console.log( - `\tTrain budget: ${ - model.imageClassificationModelMetadata.trainBudget - }` - ); - console.log( - `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` - ); - console.log( - `\tStop reason: ${ - model.imageClassificationModelMetadata.stopReason - }` - ); - } - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model update time:`); - console.log(`\tseconds: ${model.updateTime.seconds}`); - console.log(`\tnanos: ${model.updateTime.nanos}`); - console.log(`Model deployment state: ${model.deploymentState}`); - console.log(`\n`); - }); - }) - .catch(err => { - console.error(err); - }); + const [models] = await client.listModels({ + parent: projectLocation, + filter: filter, + }); + + // Display the model information. + console.log(`List of models:`); + models.forEach(model => { + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model dataset id: ${model.datasetId}`); + if (model.modelMetadata === `translationModelMetadata`) { + console.log(`Translation model metadata:`); + console.log(`\tBase model: ${model.translationModelMetadata.baseModel}`); + console.log( + `\tSource language code: ${ + model.translationModelMetadata.sourceLanguageCode + }` + ); + console.log( + `\tTarget language code: ${ + model.translationModelMetadata.targetLanguageCode + }` + ); + } else if (model.modelMetadata === `textClassificationModelMetadata`) { + console.log( + `Text classification model metadata: ${ + model.textClassificationModelMetadata + }` + ); + } else if (model.modelMetadata === `imageClassificationModelMetadata`) { + console.log(`Image classification model metadata:`); + console.log( + `\tBase model id: ${model.imageClassificationModelMetadata.baseModelId}` + ); + console.log( + `\tTrain budget: ${model.imageClassificationModelMetadata.trainBudget}` + ); + console.log( + `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` + ); + console.log( + `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` + ); + } + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model update time:`); + console.log(`\tseconds: ${model.updateTime.seconds}`); + console.log(`\tnanos: ${model.updateTime.nanos}`); + console.log(`Model deployment state: ${model.deploymentState}`); + console.log(`\n`); + }); // [END automl_natural_language_listModels] } -function getModel(projectId, computeRegion, modelId) { +async function getModel(projectId, computeRegion, modelId) { // [START automl_natural_language_getModel] const automl = require(`@google-cloud/automl`); @@ -213,71 +187,63 @@ function getModel(projectId, computeRegion, modelId) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // Get complete detail of the model. - client - .getModel({name: modelFullId}) - .then(responses => { - const model = responses[0]; - - // Display the model information. - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model dataset id: ${model.datasetId}`); - if (model.modelMetadata === `translationModelMetadata`) { - console.log(`Translation model metadata:`); - console.log( - `\tBase model: ${model.translationModelMetadata.baseModel}` - ); - console.log( - `\tSource language code: ${ - model.translationModelMetadata.sourceLanguageCode - }` - ); - console.log( - `\tTarget language code: ${ - model.translationModelMetadata.targetLanguageCode - }` - ); - } else if (model.modelMetadata === `textClassificationModelMetadata`) { - console.log( - `Text classification model metadata: ${ - model.textClassificationModelMetadata - }` - ); - } else if (model.modelMetadata === `imageClassificationModelMetadata`) { - console.log(`Image classification model metadata:`); - console.log( - `\tBase model id: ${ - model.imageClassificationModelMetadata.baseModelId - }` - ); - console.log( - `\tTrain budget: ${ - model.imageClassificationModelMetadata.trainBudget - }` - ); - console.log( - `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` - ); - console.log( - `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` - ); - } - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model update time:`); - console.log(`\tseconds: ${model.updateTime.seconds}`); - console.log(`\tnanos: ${model.updateTime.nanos}`); - console.log(`Model deployment state: ${model.deploymentState}`); - }) - .catch(err => { - console.error(err); - }); + const [model] = await client.getModel({name: modelFullId}); + + // Display the model information. + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model dataset id: ${model.datasetId}`); + if (model.modelMetadata === `translationModelMetadata`) { + console.log(`Translation model metadata:`); + console.log(`\tBase model: ${model.translationModelMetadata.baseModel}`); + console.log( + `\tSource language code: ${ + model.translationModelMetadata.sourceLanguageCode + }` + ); + console.log( + `\tTarget language code: ${ + model.translationModelMetadata.targetLanguageCode + }` + ); + } else if (model.modelMetadata === `textClassificationModelMetadata`) { + console.log( + `Text classification model metadata: ${ + model.textClassificationModelMetadata + }` + ); + } else if (model.modelMetadata === `imageClassificationModelMetadata`) { + console.log(`Image classification model metadata:`); + console.log( + `\tBase model id: ${model.imageClassificationModelMetadata.baseModelId}` + ); + console.log( + `\tTrain budget: ${model.imageClassificationModelMetadata.trainBudget}` + ); + console.log( + `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` + ); + console.log( + `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` + ); + } + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model update time:`); + console.log(`\tseconds: ${model.updateTime.seconds}`); + console.log(`\tnanos: ${model.updateTime.nanos}`); + console.log(`Model deployment state: ${model.deploymentState}`); // [END automl_natural_language_getModel] } -function listModelEvaluations(projectId, computeRegion, modelId, filter_) { +async function listModelEvaluations( + projectId, + computeRegion, + modelId, + filter_ +) { // [START automl_natural_language_listModelEvaluations] const automl = require(`@google-cloud/automl`); const util = require(`util`); @@ -296,22 +262,19 @@ function listModelEvaluations(projectId, computeRegion, modelId, filter_) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // List all the model evaluations in the model by applying filter. - client - .listModelEvaluations({parent: modelFullId, filter: filter_}) - .then(responses => { - const elements = responses[0]; - console.log(`List of model evaluations:`); - elements.forEach(element => { - console.log(util.inspect(element, false, null)); - }); - }) - .catch(err => { - console.error(err); - }); + const [elements] = await client.listModelEvaluations({ + parent: modelFullId, + filter: filter_, + }); + console.log(`List of model evaluations:`); + elements.forEach(element => { + console.log(util.inspect(element, false, null)); + }); + // [END automl_natural_language_listModelEvaluations] } -function getModelEvaluation( +async function getModelEvaluation( projectId, computeRegion, modelId, @@ -340,19 +303,14 @@ function getModelEvaluation( ); // Get complete detail of the model evaluation. - client - .getModelEvaluation({name: modelEvaluationFullId}) - .then(responses => { - const response = responses[0]; - console.log(util.inspect(response, false, null)); - }) - .catch(err => { - console.error(err); - }); + const [response] = await client.getModelEvaluation({ + name: modelEvaluationFullId, + }); + console.log(util.inspect(response, false, null)); // [END automl_natural_language_getModelEvaluation] } -function displayEvaluation(projectId, computeRegion, modelId, filter) { +async function displayEvaluation(projectId, computeRegion, modelId, filter) { // [START automl_natural_language_displayEvaluation] const automl = require(`@google-cloud/automl`); const math = require(`mathjs`); @@ -371,83 +329,72 @@ function displayEvaluation(projectId, computeRegion, modelId, filter) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // List all the model evaluations in the model by applying filter. - client - .listModelEvaluations({parent: modelFullId, filter: filter}) - .then(respond => { - const response = respond[0]; - response.forEach(element => { - // There is evaluation for each class in a model and for overall model. - // Get only the evaluation of overall model. - if (!element.annotationSpecId) { - const modelEvaluationId = element.name.split(`/`).pop(-1); - - // Resource name for the model evaluation. - const modelEvaluationFullId = client.modelEvaluationPath( - projectId, - computeRegion, - modelId, - modelEvaluationId - ); + const [response] = await client.listModelEvaluations({ + parent: modelFullId, + filter: filter, + }); + response.forEach(async element => { + // There is evaluation for each class in a model and for overall model. + // Get only the evaluation of overall model. + if (!element.annotationSpecId) { + const modelEvaluationId = element.name.split(`/`).pop(-1); + + // Resource name for the model evaluation. + const modelEvaluationFullId = client.modelEvaluationPath( + projectId, + computeRegion, + modelId, + modelEvaluationId + ); + + // Get a model evaluation. + + const [modelEvaluation] = await client.getModelEvaluation({ + name: modelEvaluationFullId, + }); + + const classMetrics = modelEvaluation.classificationEvaluationMetrics; + + const confidenceMetricsEntries = classMetrics.confidenceMetricsEntry; - // Get a model evaluation. - client - .getModelEvaluation({name: modelEvaluationFullId}) - .then(responses => { - const modelEvaluation = responses[0]; - - const classMetrics = - modelEvaluation.classificationEvaluationMetrics; - - const confidenceMetricsEntries = - classMetrics.confidenceMetricsEntry; - - // Showing model score based on threshold of 0.5 - confidenceMetricsEntries.forEach(confidenceMetricsEntry => { - if (confidenceMetricsEntry.confidenceThreshold === 0.5) { - console.log( - `Precision and recall are based on a score threshold of 0.5` - ); - console.log( - `Model Precision: `, - math.round(confidenceMetricsEntry.precision * 100, 2) + `%` - ); - console.log( - `Model Recall: `, - math.round(confidenceMetricsEntry.recall * 100, 2) + `%` - ); - console.log( - `Model F1 score: `, - math.round(confidenceMetricsEntry.f1Score * 100, 2) + `%` - ); - console.log( - `Model Precision@1: `, - math.round(confidenceMetricsEntry.precisionAt1 * 100, 2) + - `%` - ); - console.log( - `Model Recall@1: `, - math.round(confidenceMetricsEntry.recallAt1 * 100, 2) + `%` - ); - console.log( - `Model F1 score@1: `, - math.round(confidenceMetricsEntry.f1ScoreAt1 * 100, 2) + `%` - ); - } - }); - }) - .catch(err => { - console.error(err); - }); + // Showing model score based on threshold of 0.5 + confidenceMetricsEntries.forEach(confidenceMetricsEntry => { + if (confidenceMetricsEntry.confidenceThreshold === 0.5) { + console.log( + `Precision and recall are based on a score threshold of 0.5` + ); + console.log( + `Model Precision: `, + math.round(confidenceMetricsEntry.precision * 100, 2) + `%` + ); + console.log( + `Model Recall: `, + math.round(confidenceMetricsEntry.recall * 100, 2) + `%` + ); + console.log( + `Model F1 score: `, + math.round(confidenceMetricsEntry.f1Score * 100, 2) + `%` + ); + console.log( + `Model Precision@1: `, + math.round(confidenceMetricsEntry.precisionAt1 * 100, 2) + `%` + ); + console.log( + `Model Recall@1: `, + math.round(confidenceMetricsEntry.recallAt1 * 100, 2) + `%` + ); + console.log( + `Model F1 score@1: `, + math.round(confidenceMetricsEntry.f1ScoreAt1 * 100, 2) + `%` + ); } }); - }) - .catch(err => { - console.error(err); - }); + } + }); // [END automl_natural_language_displayEvaluation] } -function deleteModel(projectId, computeRegion, modelId) { +async function deleteModel(projectId, computeRegion, modelId) { // [START automl_natural_language_deleteModel] const automl = require(`@google-cloud/automl`); @@ -464,145 +411,141 @@ function deleteModel(projectId, computeRegion, modelId) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // Delete a model. - client - .deleteModel({name: modelFullId}) - .then(responses => { - const operation = responses[0]; - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Model deleted.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.deleteModel({name: modelFullId}); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Model deleted.`); + // [END automl_natural_language_deleteModel] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: ``, - type: `string`, - requiresArg: true, - description: `Name of the Dataset to search for`, - }, - modelName: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: `Name of the model`, - }, - modelId: { - alias: `a`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model`, - }, - modelEvaluationId: { - alias: `e`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model evaluation`, - }, - operationFullId: { - alias: `o`, - type: `string`, - default: ``, - requiresArg: true, - description: `Full name of an operation`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - trainBudget: { - alias: `t`, - type: `string`, - default: ``, - requiresArg: true, - description: `Budget for training the model`, - }, - }) - .command(`create-model`, `creates a new Model`, {}, opts => - createModel( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.modelName, - opts.trainBudget +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: ``, + type: `string`, + requiresArg: true, + description: `Name of the Dataset to search for`, + }, + modelName: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: `Name of the model`, + }, + modelId: { + alias: `a`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model`, + }, + modelEvaluationId: { + alias: `e`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model evaluation`, + }, + operationFullId: { + alias: `o`, + type: `string`, + default: ``, + requiresArg: true, + description: `Full name of an operation`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + trainBudget: { + alias: `t`, + type: `string`, + default: ``, + requiresArg: true, + description: `Budget for training the model`, + }, + }) + .command(`create-model`, `creates a new Model`, {}, opts => + createModel( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.modelName, + opts.trainBudget + ) + ) + .command( + `get-operation-status`, + `Gets status of current operation`, + {}, + opts => getOperationStatus(opts.operationFullId) + ) + .command(`list-models`, `list all Models`, {}, opts => + listModels(opts.projectId, opts.computeRegion, opts.filter) ) - ) - .command( - `get-operation-status`, - `Gets status of current operation`, - {}, - opts => getOperationStatus(opts.operationFullId) - ) - .command(`list-models`, `list all Models`, {}, opts => - listModels(opts.projectId, opts.computeRegion, opts.filter) - ) - .command(`get-model`, `Get a Model`, {}, opts => - getModel(opts.projectId, opts.computeRegion, opts.modelId) - ) - .command(`list-model-evaluations`, `List model evaluations`, {}, opts => - listModelEvaluations( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter + .command(`get-model`, `Get a Model`, {}, opts => + getModel(opts.projectId, opts.computeRegion, opts.modelId) ) - ) - .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => - getModelEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.modelEvaluationId + .command(`list-model-evaluations`, `List model evaluations`, {}, opts => + listModelEvaluations( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter + ) ) - ) - .command(`display-evaluation`, `Display evaluation`, {}, opts => - displayEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter + .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => + getModelEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.modelEvaluationId + ) ) - ) - .command(`delete-model`, `Delete a Model`, {}, opts => - deleteModel(opts.projectId, opts.computeRegion, opts.modelId) - ) - .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) - .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) - .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) - .example(`node $0 get-model -a "ModelID"`) - .example(`node $0 list-model-evaluations -a "ModelID"`) - .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) - .example(`node $0 display-evaluation -a "ModelId"`) - .example(`node $0 delete-model -a "ModelID"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + .command(`display-evaluation`, `Display evaluation`, {}, opts => + displayEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter + ) + ) + .command(`delete-model`, `Delete a Model`, {}, opts => + deleteModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) + .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) + .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) + .example(`node $0 get-model -a "ModelID"`) + .example(`node $0 list-model-evaluations -a "ModelID"`) + .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) + .example(`node $0 display-evaluation -a "ModelId"`) + .example(`node $0 delete-model -a "ModelID"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/cloud-language/snippets/automl/automlNaturalLanguagePredict.js b/cloud-language/snippets/automl/automlNaturalLanguagePredict.js index bf0395e3e7..4a6fff3122 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguagePredict.js +++ b/cloud-language/snippets/automl/automlNaturalLanguagePredict.js @@ -23,7 +23,7 @@ `use strict`; -function predict(projectId, computeRegion, modelId, filePath) { +async function predict(projectId, computeRegion, modelId, filePath) { // [START automl_natural_language_predict] const automl = require(`@google-cloud/automl`); const fs = require(`fs`); @@ -55,72 +55,74 @@ function predict(projectId, computeRegion, modelId, filePath) { // Params is additional domain-specific parameters. // Currently there is no additional parameters supported. - client - .predict({name: modelFullId, payload: payload, params: {}}) - .then(responses => { - console.log(`Prediction results:`); - responses[0].payload.forEach(result => { - console.log(`Predicted class name: ${result.displayName}`); - console.log(`Predicted class score: ${result.classification.score}`); - }); - }) - .catch(err => { - console.error(err); - }); + const [response] = await client.predict({ + name: modelFullId, + payload: payload, + params: {}, + }); + console.log(`Prediction results:`); + response[0].payload.forEach(result => { + console.log(`Predicted class name: ${result.displayName}`); + console.log(`Predicted class score: ${result.classification.score}`); + }); // [END automl_natural_language_predict] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - filePath: { - alias: `f`, - default: `./resources/test.txt`, - type: `string`, - requiresArg: true, - description: `local text file path of the content to be classified`, - }, - modelId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the model which will be used for text classification`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - scoreThreshold: { - alias: `s`, - type: `string`, - default: `0.5`, - requiresArg: true, - description: - `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + - `only produce results that have at least this confidence score threshold. Default is .5`, - }, - }) - .command(`predict`, `classify the content`, {}, opts => - predict( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filePath, - opts.scoreThreshold +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + filePath: { + alias: `f`, + default: `./resources/test.txt`, + type: `string`, + requiresArg: true, + description: `local text file path of the content to be classified`, + }, + modelId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the model which will be used for text classification`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + scoreThreshold: { + alias: `s`, + type: `string`, + default: `0.5`, + requiresArg: true, + description: + `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + + `only produce results that have at least this confidence score threshold. Default is .5`, + }, + }) + .command(`predict`, `classify the content`, {}, opts => + predict( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filePath, + opts.scoreThreshold + ) ) - ) - .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/cloud-language/snippets/quickstart.js b/cloud-language/snippets/quickstart.js index b1b5683bb9..eaae8c5ecc 100644 --- a/cloud-language/snippets/quickstart.js +++ b/cloud-language/snippets/quickstart.js @@ -16,31 +16,29 @@ 'use strict'; // [START language_quickstart] -// Imports the Google Cloud client library -const language = require('@google-cloud/language'); - -// Instantiates a client -const client = new language.LanguageServiceClient(); - -// The text to analyze -const text = 'Hello, world!'; - -const document = { - content: text, - type: 'PLAIN_TEXT', -}; - -// Detects the sentiment of the text -client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - - console.log(`Text: ${text}`); - console.log(`Sentiment score: ${sentiment.score}`); - console.log(`Sentiment magnitude: ${sentiment.magnitude}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); +async function main() { + // Imports the Google Cloud client library + const language = require('@google-cloud/language'); + + // Instantiates a client + const client = new language.LanguageServiceClient(); + + // The text to analyze + const text = 'Hello, world!'; + + const document = { + content: text, + type: 'PLAIN_TEXT', + }; + + // Detects the sentiment of the text + const [result] = await client.analyzeSentiment({document: document}); + const sentiment = result.documentSentiment; + + console.log(`Text: ${text}`); + console.log(`Sentiment score: ${sentiment.score}`); + console.log(`Sentiment magnitude: ${sentiment.magnitude}`); +} + +main().catch(console.error); // [END language_quickstart]