From 7cb1e7517035c72c0f57dc1bb9b08fe00cdaa0e7 Mon Sep 17 00:00:00 2001 From: Justin Beckwith Date: Fri, 21 Dec 2018 14:03:22 -0800 Subject: [PATCH] refactor: modernize the sample tests (#185) * refactor: modernize the sample tests * lintalicious --- cloud-language/snippets/.eslintrc.yml | 1 + cloud-language/snippets/analyze.v1.js | 197 +++++++------- cloud-language/snippets/analyze.v1beta2.js | 182 ++++++------- cloud-language/snippets/automl/.eslintrc.yml | 4 - .../automl/automlNaturalLanguageDataset.js | 212 +++++++-------- .../automl/automlNaturalLanguageModel.js | 248 +++++++++-------- .../automl/automlNaturalLanguagePredict.js | 110 ++++---- cloud-language/snippets/automl/package.json | 31 --- .../snippets/automl/system-test/.eslintrc.yml | 3 - .../system-test/automlNaturalLanguage.test.js | 140 ---------- .../snippets/automl/system-test/mocha.opts | 2 - cloud-language/snippets/package.json | 5 +- cloud-language/snippets/quickstart.js | 6 +- .../snippets/test/analyze.v1.test.js | 253 +++++++++--------- .../snippets/test/analyze.v1beta2.test.js | 189 ++++++------- .../test/automlNaturalLanguage.test.js | 139 ++++++++++ cloud-language/snippets/test/mocha.opts | 2 - .../snippets/test/quickstart.test.js | 23 +- 18 files changed, 819 insertions(+), 928 deletions(-) delete mode 100644 cloud-language/snippets/automl/.eslintrc.yml delete mode 100644 cloud-language/snippets/automl/package.json delete mode 100644 cloud-language/snippets/automl/system-test/.eslintrc.yml delete mode 100644 cloud-language/snippets/automl/system-test/automlNaturalLanguage.test.js delete mode 100644 cloud-language/snippets/automl/system-test/mocha.opts create mode 100644 cloud-language/snippets/test/automlNaturalLanguage.test.js delete mode 100644 cloud-language/snippets/test/mocha.opts diff --git a/cloud-language/snippets/.eslintrc.yml b/cloud-language/snippets/.eslintrc.yml index 282535f55f..0aa37ac630 100644 --- a/cloud-language/snippets/.eslintrc.yml +++ b/cloud-language/snippets/.eslintrc.yml @@ -1,3 +1,4 @@ --- rules: no-console: off + node/no-missing-require: off diff --git a/cloud-language/snippets/analyze.v1.js b/cloud-language/snippets/analyze.v1.js index f506f78dfc..83fb5b1947 100644 --- a/cloud-language/snippets/analyze.v1.js +++ b/cloud-language/snippets/analyze.v1.js @@ -346,105 +346,98 @@ async function classifyTextInFile(bucketName, fileName) { // [END language_classify_gcs] } -async function main() { - require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - opts => analyzeSentimentOfText(opts.text) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) - ) - .command( - `entities-text `, - `Detects entities in a string.`, - {}, - opts => analyzeEntitiesOfText(opts.text) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) - ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) - ) - .command( - `entity-sentiment-text `, - `Detects sentiment of the entities in a string.`, - {}, - opts => analyzeEntitySentimentOfText(opts.text) - ) - .command( - `entity-sentiment-file `, - `Detects sentiment of the entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) - ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - opts => classifyTextInFile(opts.bucketName, opts.fileName) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 entity-sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entity-sentiment-file my-bucket file.txt`, - `Detects sentiment of entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; -} - -main().catch(console.error); +require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command(`entities-text `, `Detects entities in a string.`, {}, opts => + analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command( + `entity-sentiment-text `, + `Detects sentiment of the entities in a string.`, + {}, + opts => analyzeEntitySentimentOfText(opts.text) + ) + .command( + `entity-sentiment-file `, + `Detects sentiment of the entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => classifyTextInFile(opts.bucketName, opts.fileName) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 entity-sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entity-sentiment-file my-bucket file.txt`, + `Detects sentiment of entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; diff --git a/cloud-language/snippets/analyze.v1beta2.js b/cloud-language/snippets/analyze.v1beta2.js index c393a165f5..f798b016ce 100644 --- a/cloud-language/snippets/analyze.v1beta2.js +++ b/cloud-language/snippets/analyze.v1beta2.js @@ -248,7 +248,7 @@ async function classifyTextOfText(text) { // [END language_classify_string] } -function classifyTextInFile(bucketName, fileName) { +async function classifyTextInFile(bucketName, fileName) { // [START language_classify_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -269,105 +269,87 @@ function classifyTextInFile(bucketName, fileName) { }; // Classifies text in the document - client - .classifyText({document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document}); + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_file] } -async function main() { - require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - opts => analyzeSentimentOfText(opts.text) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) - ) - .command( - `entities-text `, - `Detects entities in a string.`, - {}, - opts => analyzeEntitiesOfText(opts.text) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) - ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) - ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - opts => - classifyTextInFile(opts.bucketName, opts.fileName).catch(console.error) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; -} - -main().catch(console.error); +require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command(`entities-text `, `Detects entities in a string.`, {}, opts => + analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => classifyTextInFile(opts.bucketName, opts.fileName) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; diff --git a/cloud-language/snippets/automl/.eslintrc.yml b/cloud-language/snippets/automl/.eslintrc.yml deleted file mode 100644 index 0aa37ac630..0000000000 --- a/cloud-language/snippets/automl/.eslintrc.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -rules: - no-console: off - node/no-missing-require: off diff --git a/cloud-language/snippets/automl/automlNaturalLanguageDataset.js b/cloud-language/snippets/automl/automlNaturalLanguageDataset.js index b6563d4e4a..139efc0ef9 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguageDataset.js +++ b/cloud-language/snippets/automl/automlNaturalLanguageDataset.js @@ -251,114 +251,110 @@ async function deleteDataset(projectId, computeRegion, datasetId) { // [END automl_natural_language_deleteDataset] } -async function main() { - require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetName: { - alias: `n`, - type: `string`, - default: `testDataSet`, - requiresArg: true, - description: `Name of the Dataset`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: `text_classification_dataset_metadata:*`, - type: `string`, - requiresArg: false, - description: `filter expression`, - }, - multilabel: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: - `Type of the classification problem, ` + - `False - MULTICLASS, True - MULTILABEL.`, - }, - outputUri: { - alias: `o`, - type: `string`, - requiresArg: true, - description: `URI (or local path) to export dataset`, - }, - path: { - alias: `p`, - type: `string`, - global: true, - default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, - requiresArg: true, - description: `URI or local path to input .csv, or array of .csv paths`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - }) - .command(`create-dataset`, `creates a new Dataset`, {}, opts => - createDataset( +require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetName: { + alias: `n`, + type: `string`, + default: `testDataSet`, + requiresArg: true, + description: `Name of the Dataset`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: `text_classification_dataset_metadata:*`, + type: `string`, + requiresArg: false, + description: `filter expression`, + }, + multilabel: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: + `Type of the classification problem, ` + + `False - MULTICLASS, True - MULTILABEL.`, + }, + outputUri: { + alias: `o`, + type: `string`, + requiresArg: true, + description: `URI (or local path) to export dataset`, + }, + path: { + alias: `p`, + type: `string`, + global: true, + default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, + requiresArg: true, + description: `URI or local path to input .csv, or array of .csv paths`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + }) + .command(`create-dataset`, `creates a new Dataset`, {}, opts => + createDataset( + opts.projectId, + opts.computeRegion, + opts.datasetName, + opts.multilabel + ) + ) + .command(`list-datasets`, `list all Datasets`, {}, opts => + listDatasets(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-dataset`, `Get a Dataset`, {}, opts => + getDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`delete-dataset`, `Delete a dataset`, {}, opts => + deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`import-data`, `Import labeled items into dataset`, {}, opts => + importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) + ) + .command( + `export-data`, + `Export a dataset to a Google Cloud Storage Bucket`, + {}, + opts => + exportData( opts.projectId, opts.computeRegion, - opts.datasetName, - opts.multilabel + opts.datasetId, + opts.outputUri ) - ) - .command(`list-datasets`, `list all Datasets`, {}, opts => - listDatasets(opts.projectId, opts.computeRegion, opts.filter) - ) - .command(`get-dataset`, `Get a Dataset`, {}, opts => - getDataset(opts.projectId, opts.computeRegion, opts.datasetId) - ) - .command(`delete-dataset`, `Delete a dataset`, {}, opts => - deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) - ) - .command(`import-data`, `Import labeled items into dataset`, {}, opts => - importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) - ) - .command( - `export-data`, - `Export a dataset to a Google Cloud Storage Bucket`, - {}, - opts => - exportData( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.outputUri - ) - ) - .example(`node $0 create-dataset -n "newDataSet"`) - .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) - .example(`node $0 get-dataset -i "DATASETID"`) - .example(`node $0 delete-dataset -i "DATASETID"`) - .example( - `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` - ) - .example( - `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` - ) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; -} - -main().catch(console.error); + ) + .example(`node $0 create-dataset -n "newDataSet"`) + .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) + .example(`node $0 get-dataset -i "DATASETID"`) + .example(`node $0 delete-dataset -i "DATASETID"`) + .example( + `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` + ) + .example( + `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` + ) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; diff --git a/cloud-language/snippets/automl/automlNaturalLanguageModel.js b/cloud-language/snippets/automl/automlNaturalLanguageModel.js index 96492852c0..747f141c1d 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguageModel.js +++ b/cloud-language/snippets/automl/automlNaturalLanguageModel.js @@ -419,133 +419,129 @@ async function deleteModel(projectId, computeRegion, modelId) { // [END automl_natural_language_deleteModel] } -async function main() { - require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: ``, - type: `string`, - requiresArg: true, - description: `Name of the Dataset to search for`, - }, - modelName: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: `Name of the model`, - }, - modelId: { - alias: `a`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model`, - }, - modelEvaluationId: { - alias: `e`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model evaluation`, - }, - operationFullId: { - alias: `o`, - type: `string`, - default: ``, - requiresArg: true, - description: `Full name of an operation`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - trainBudget: { - alias: `t`, - type: `string`, - default: ``, - requiresArg: true, - description: `Budget for training the model`, - }, - }) - .command(`create-model`, `creates a new Model`, {}, opts => - createModel( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.modelName, - opts.trainBudget - ) +require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: ``, + type: `string`, + requiresArg: true, + description: `Name of the Dataset to search for`, + }, + modelName: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: `Name of the model`, + }, + modelId: { + alias: `a`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model`, + }, + modelEvaluationId: { + alias: `e`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model evaluation`, + }, + operationFullId: { + alias: `o`, + type: `string`, + default: ``, + requiresArg: true, + description: `Full name of an operation`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + trainBudget: { + alias: `t`, + type: `string`, + default: ``, + requiresArg: true, + description: `Budget for training the model`, + }, + }) + .command(`create-model`, `creates a new Model`, {}, opts => + createModel( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.modelName, + opts.trainBudget ) - .command( - `get-operation-status`, - `Gets status of current operation`, - {}, - opts => getOperationStatus(opts.operationFullId) + ) + .command( + `get-operation-status`, + `Gets status of current operation`, + {}, + opts => getOperationStatus(opts.operationFullId) + ) + .command(`list-models`, `list all Models`, {}, opts => + listModels(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-model`, `Get a Model`, {}, opts => + getModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .command(`list-model-evaluations`, `List model evaluations`, {}, opts => + listModelEvaluations( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter ) - .command(`list-models`, `list all Models`, {}, opts => - listModels(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => + getModelEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.modelEvaluationId ) - .command(`get-model`, `Get a Model`, {}, opts => - getModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .command(`display-evaluation`, `Display evaluation`, {}, opts => + displayEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter ) - .command(`list-model-evaluations`, `List model evaluations`, {}, opts => - listModelEvaluations( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter - ) - ) - .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => - getModelEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.modelEvaluationId - ) - ) - .command(`display-evaluation`, `Display evaluation`, {}, opts => - displayEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter - ) - ) - .command(`delete-model`, `Delete a Model`, {}, opts => - deleteModel(opts.projectId, opts.computeRegion, opts.modelId) - ) - .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) - .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) - .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) - .example(`node $0 get-model -a "ModelID"`) - .example(`node $0 list-model-evaluations -a "ModelID"`) - .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) - .example(`node $0 display-evaluation -a "ModelId"`) - .example(`node $0 delete-model -a "ModelID"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; -} - -main().catch(console.error); + ) + .command(`delete-model`, `Delete a Model`, {}, opts => + deleteModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) + .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) + .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) + .example(`node $0 get-model -a "ModelID"`) + .example(`node $0 list-model-evaluations -a "ModelID"`) + .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) + .example(`node $0 display-evaluation -a "ModelId"`) + .example(`node $0 delete-model -a "ModelID"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; diff --git a/cloud-language/snippets/automl/automlNaturalLanguagePredict.js b/cloud-language/snippets/automl/automlNaturalLanguagePredict.js index 4a6fff3122..77b45fd725 100755 --- a/cloud-language/snippets/automl/automlNaturalLanguagePredict.js +++ b/cloud-language/snippets/automl/automlNaturalLanguagePredict.js @@ -68,61 +68,57 @@ async function predict(projectId, computeRegion, modelId, filePath) { // [END automl_natural_language_predict] } -async function main() { - require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - filePath: { - alias: `f`, - default: `./resources/test.txt`, - type: `string`, - requiresArg: true, - description: `local text file path of the content to be classified`, - }, - modelId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the model which will be used for text classification`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - scoreThreshold: { - alias: `s`, - type: `string`, - default: `0.5`, - requiresArg: true, - description: - `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + - `only produce results that have at least this confidence score threshold. Default is .5`, - }, - }) - .command(`predict`, `classify the content`, {}, opts => - predict( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filePath, - opts.scoreThreshold - ) +require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + filePath: { + alias: `f`, + default: `./resources/test.txt`, + type: `string`, + requiresArg: true, + description: `local text file path of the content to be classified`, + }, + modelId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the model which will be used for text classification`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + scoreThreshold: { + alias: `s`, + type: `string`, + default: `0.5`, + requiresArg: true, + description: + `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + + `only produce results that have at least this confidence score threshold. Default is .5`, + }, + }) + .command(`predict`, `classify the content`, {}, opts => + predict( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filePath, + opts.scoreThreshold ) - .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; -} - -main().catch(console.error); + ) + .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; diff --git a/cloud-language/snippets/automl/package.json b/cloud-language/snippets/automl/package.json deleted file mode 100644 index 94c46794f1..0000000000 --- a/cloud-language/snippets/automl/package.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "name": "automl", - "version": "1.0.0", - "description": "", - "main": "automl_natural_language_dataset.js", - "files": [ - "*.js", - "resources" - ], - "scripts": { - "test": "mocha system-test/*.test.js --opts system-test/mocha.opts" - }, - "engines": { - "node": ">=8.0.0" - }, - "author": "", - "license": "ISC", - "dependencies": { - "@google-cloud/automl": "^0.1.1", - "@google-cloud/language": "^2.0.0", - "mathjs": "^5.0.4", - "util": "^0.11.0", - "yargs": "^12.0.1" - }, - "devDependencies": { - "@google-cloud/nodejs-repo-tools": "^3.0.0", - "mocha": "^5.2.0", - "proxyquire": "^2.0.1", - "sinon": "^7.0.0" - } -} diff --git a/cloud-language/snippets/automl/system-test/.eslintrc.yml b/cloud-language/snippets/automl/system-test/.eslintrc.yml deleted file mode 100644 index 6db2a46c53..0000000000 --- a/cloud-language/snippets/automl/system-test/.eslintrc.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -env: - mocha: true diff --git a/cloud-language/snippets/automl/system-test/automlNaturalLanguage.test.js b/cloud-language/snippets/automl/system-test/automlNaturalLanguage.test.js deleted file mode 100644 index 33cf32d3f9..0000000000 --- a/cloud-language/snippets/automl/system-test/automlNaturalLanguage.test.js +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Copyright 2018, Google, LLC. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -const assert = require(`assert`); -const tools = require(`@google-cloud/nodejs-repo-tools`); - -const cmdDataset = `node automlNaturalLanguageDataset.js`; -const cmdModel = `node automlNaturalLanguageModel.js`; -const cmdPredict = `node automlNaturalLanguagePredict.js`; - -const testDataSetName = `testDataset`; -const dummyDataSet = `dummyDataset`; -const testModelName = `dummyModel`; -const sampleText = `./resources/test.txt`; - -// Skipped because it's been taking too long to delete datasets -it.skip(`should create a create, list, and delete a dataset`, async () => { - // Check to see that this dataset does not yet exist - let output = await tools.runAsync(`${cmdDataset} list-datasets`); - //t.false(output.includes(testDataSetName)); - assert.notStrictEqual(RegExp(`testDataset`).test(output)); - - // Create dataset - output = await tools.runAsync( - `${cmdDataset} create-dataset -n "${testDataSetName}"` - ); - const parsedOut = output.split(`\n`); - const dataSetId = parsedOut[1].split(`:`)[1].trim(); - assert(RegExp(`Dataset display name: testDataset`).test(output)); - - // Delete dataset - output = await tools.runAsync( - `${cmdDataset} delete-dataset -i "${dataSetId}"` - ); - assert(RegExp(`Dataset deleted.`).test(output)); -}); - -// See : https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/NaturalLanguage/automl/model_test.py -// We make two models running this test, see hard-coded workaround below -it.skip(`should create a dataset, import data, and start making a model`, async () => { - // Check to see that this dataset does not yet exist - let output = await tools.runAsync(`${cmdDataset} list-datasets`); - assert.notStrictEqual(RegExp(`dummyDataset`).test(output)); - - // Create dataset - output = await tools.runAsync( - `${cmdDataset} create-dataset -n "${dummyDataSet}"` - ); - - const dataSetId = output - .split(`\n`)[1] - .split(`:`)[1] - .trim(); - assert(RegExp(`Dataset display name: dummyDataSet`).test(output)); - - // Import Data - output = await tools.runAsync( - `${cmdDataset} import-data -i "${dataSetId}" -p "gs://nodejs-docs-samples-vcm/happiness.csv"` - ); - assert(RegExp(`Data imported.`).test(output)); - - // Check to make sure model doesn't already exist - output = await tools.runAsync(`${cmdModel} list-models`); - assert.notStrictEqual(RegExp(`dummyModel`).test(output)); - - // Begin training dataset, getting operation ID for next operation - output = await tools.runAsync( - `${cmdModel} create-model -i "${dataSetId}" -m "${testModelName}" -t "2"` - ); - const operationName = output - .split(`\n`)[0] - .split(`:`)[1] - .trim(); - assert(RegExp(`Training started...`).test(output)); - - // Poll operation status, here confirming that operation is not complete yet - output = await tools.runAsync( - `${cmdModel} get-operation-status -i "${dataSetId}" -o "${operationName}"` - ); - assert(RegExp(`done: false`).test(output)); -}); - -it(`should display evaluation from prexisting model`, async () => { - const donotdeleteModelId = `TCN4740161257642267869`; - - // Confirm dataset exists - let output = await tools.runAsync(`${cmdDataset} list-datasets`); - assert(RegExp(`dummyDb`).test(output)); - - // List model evaluations, confirm model exists - output = await tools.runAsync( - `${cmdModel} list-model-evaluations -a "${donotdeleteModelId}"` - ); - - // Display evaluation - output = await tools.runAsync( - `${cmdModel} display-evaluation -a "${donotdeleteModelId}"` - ); - assert(RegExp(`Model Precision:`).test(output)); -}); - -it(`should run Prediction from prexisting model`, async () => { - const donotdeleteModelId = `TCN4740161257642267869`; - - // Confirm dataset exists - let output = await tools.runAsync(`${cmdDataset} list-datasets`); - assert(RegExp(`do_not_delete_me`).test(output)); - - // List model evaluations, confirm model exists - output = await tools.runAsync( - `${cmdModel} list-model-evaluations -a "${donotdeleteModelId}"` - ); - assert(RegExp(`classificationEvaluationMetrics:`).test(output)); - - // Run prediction on 'test.txt' in resources folder - output = await tools.runAsync( - `${cmdPredict} predict -i "${donotdeleteModelId}" -f "${sampleText}" -s "0.5"` - ); - assert(RegExp(`Firm_Cheese`).test(output)); -}); - -// List datasets -it(`should list datasets`, async () => { - const output = await tools.runAsync(`${cmdDataset} list-datasets`); - assert(RegExp(`List of datasets:`).test(output)); -}); diff --git a/cloud-language/snippets/automl/system-test/mocha.opts b/cloud-language/snippets/automl/system-test/mocha.opts deleted file mode 100644 index f30d8e67d4..0000000000 --- a/cloud-language/snippets/automl/system-test/mocha.opts +++ /dev/null @@ -1,2 +0,0 @@ ---timeout 20000 ---throw-deprecation \ No newline at end of file diff --git a/cloud-language/snippets/package.json b/cloud-language/snippets/package.json index 63c02e645a..aa2c4cb28a 100644 --- a/cloud-language/snippets/package.json +++ b/cloud-language/snippets/package.json @@ -12,7 +12,7 @@ "resources" ], "scripts": { - "test": "mocha" + "test": "mocha --timeout 60000" }, "dependencies": { "@google-cloud/automl": "^0.1.1", @@ -22,7 +22,8 @@ "yargs": "^12.0.0" }, "devDependencies": { - "@google-cloud/nodejs-repo-tools": "^3.0.0", + "chai": "^4.2.0", + "execa": "^1.0.0", "mocha": "^5.2.0", "uuid": "^3.2.1" } diff --git a/cloud-language/snippets/quickstart.js b/cloud-language/snippets/quickstart.js index eaae8c5ecc..858c30b011 100644 --- a/cloud-language/snippets/quickstart.js +++ b/cloud-language/snippets/quickstart.js @@ -16,7 +16,7 @@ 'use strict'; // [START language_quickstart] -async function main() { +async function quickstart() { // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -39,6 +39,6 @@ async function main() { console.log(`Sentiment score: ${sentiment.score}`); console.log(`Sentiment magnitude: ${sentiment.magnitude}`); } - -main().catch(console.error); // [END language_quickstart] + +quickstart().catch(console.error); diff --git a/cloud-language/snippets/test/analyze.v1.test.js b/cloud-language/snippets/test/analyze.v1.test.js index 90c6c3f5ad..95e3e2c28e 100644 --- a/cloud-language/snippets/test/analyze.v1.test.js +++ b/cloud-language/snippets/test/analyze.v1.test.js @@ -15,137 +15,124 @@ 'use strict'; -const fs = require(`fs`); -const path = require(`path`); -const {Storage} = require(`@google-cloud/storage`); -const storage = new Storage(); -const assert = require(`assert`); -const tools = require(`@google-cloud/nodejs-repo-tools`); -const uuid = require(`uuid`); - -const cmd = `node analyze.v1.js`; -const cwd = path.join(__dirname, `..`); -const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`; -const fileName = `text.txt`; -const fileName2 = `android_text.txt`; -const localFilePath = path.join(__dirname, `../resources/${fileName}`); -const localFilePath2 = path.join(__dirname, `../resources/${fileName2}`); -const text = fs.readFileSync(localFilePath, 'utf-8'); -const text2 = fs.readFileSync(localFilePath2, 'utf-8'); - -before(async () => { - tools.checkCredentials(); - const [bucket] = await storage.createBucket(bucketName); - await bucket.upload(localFilePath); - await bucket.upload(localFilePath2); -}); - -after(async () => { - const bucket = storage.bucket(bucketName); - await bucket.deleteFiles({force: true}); - await bucket.deleteFiles({force: true}); // Try a second time... - await bucket.delete(); -}); - -beforeEach(async () => tools.stubConsole); -afterEach(async () => tools.restoreConsole); - -it(`should analyze sentiment in text`, async () => { - const output = await tools.runAsync(`${cmd} sentiment-text "${text}"`, cwd); - assert(RegExp(`Document sentiment:`).test(output)); - assert(RegExp(`Sentence: ${text}`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); - -it(`should analyze sentiment in a file`, async () => { - const output = await tools.runAsync( - `${cmd} sentiment-file ${bucketName} ${fileName}`, - cwd - ); - assert(output, new RegExp(`Document sentiment:`).test(output)); - assert(RegExp(`Sentence: ${text}`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); - -it(`should analyze entities in text`, async () => { - const output = await tools.runAsync(`${cmd} entities-text "${text}"`, cwd); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Type: PERSON`).test(output)); - assert(RegExp(`White House`).test(output)); - assert(RegExp(`Type: LOCATION`).test(output)); -}); - -it('should analyze entities in a file', async () => { - const output = await tools.runAsync( - `${cmd} entities-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`Entities:`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Type: PERSON`).test(output)); - assert(RegExp(`White House`).test(output)); - assert(RegExp(`Type: LOCATION`).test(output)); -}); - -it(`should analyze syntax in text`, async () => { - const output = await tools.runAsync(`${cmd} syntax-text "${text}"`, cwd); - assert(RegExp(`Tokens:`).test(output)); - assert(RegExp(`NOUN:`).test(output)); - assert(RegExp(`President`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Morphology:`).test(output)); - assert(RegExp(`tag: 'NOUN'`).test(output)); -}); - -it('should analyze syntax in a file', async () => { - const output = await tools.runAsync( - `${cmd} syntax-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`NOUN:`).test(output)); - assert(RegExp(`President`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Morphology:`).test(output)); - assert(RegExp(`tag: 'NOUN'`).test(output)); -}); - -it(`should analyze entity sentiment in text`, async () => { - const output = await tools.runAsync( - `${cmd} entity-sentiment-text "${text}"`, - cwd - ); - assert(RegExp(`Entities and sentiments:`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`PERSON`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); - -it('should analyze entity sentiment in a file', async () => { - const output = await tools.runAsync( - `${cmd} entity-sentiment-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`Entities and sentiments:`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`PERSON`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); - -it('should classify text in a file', async () => { - const output = await tools.runAsync( - `${cmd} classify-file ${bucketName} ${fileName2}`, - cwd - ); - assert(RegExp(`Name:`).test(output)); - assert(RegExp(`Computers & Electronics`).test(output)); -}); - -it('should classify text in text', async () => { - const output = await tools.runAsync(`${cmd} classify-text "${text2}"`, cwd); - assert(RegExp(`Name:`).test(output)); - assert(RegExp(`Computers & Electronics`).test(output)); +const fs = require('fs'); +const path = require('path'); +const {Storage} = require('@google-cloud/storage'); +const {assert} = require('chai'); +const execa = require('execa'); +const uuid = require('uuid'); + +const exec = async cmd => (await execa.shell(cmd)).stdout; + +describe('analyze.v1', () => { + const storage = new Storage(); + const cmd = 'node analyze.v1.js'; + const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`; + const fileName = `text.txt`; + const fileName2 = `android_text.txt`; + const localFilePath = path.join(__dirname, `../resources/${fileName}`); + const localFilePath2 = path.join(__dirname, `../resources/${fileName2}`); + const text = fs.readFileSync(localFilePath, 'utf-8'); + const text2 = fs.readFileSync(localFilePath2, 'utf-8'); + + before(async () => { + const [bucket] = await storage.createBucket(bucketName); + await bucket.upload(localFilePath); + await bucket.upload(localFilePath2); + }); + + after(async () => { + const bucket = storage.bucket(bucketName); + await bucket.deleteFiles({force: true}); + await bucket.deleteFiles({force: true}); // Try a second time... + await bucket.delete(); + }); + + it('should analyze sentiment in text', async () => { + const output = await exec(`${cmd} sentiment-text "${text}"`); + assert.match(output, /Document sentiment:/); + assert.match(output, new RegExp(`Sentence: ${text}`)); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); + + it('should analyze sentiment in a file', async () => { + const output = await exec( + `${cmd} sentiment-file ${bucketName} ${fileName}` + ); + assert(output, /Document sentiment:/); + assert.match(output, new RegExp(`Sentence: ${text}`)); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); + + it('should analyze entities in text', async () => { + const output = await exec(`${cmd} entities-text "${text}"`); + assert.match(output, /Obama/); + assert.match(output, /Type: PERSON/); + assert.match(output, /White House/); + assert.match(output, /Type: LOCATION/); + }); + + it('should analyze entities in a file', async () => { + const output = await exec(`${cmd} entities-file ${bucketName} ${fileName}`); + assert.match(output, /Entities:/); + assert.match(output, /Obama/); + assert.match(output, /Type: PERSON/); + assert.match(output, /White House/); + assert.match(output, /Type: LOCATION/); + }); + + it('should analyze syntax in text', async () => { + const output = await exec(`${cmd} syntax-text "${text}"`); + assert.match(output, /Tokens:/); + assert.match(output, /NOUN:/); + assert.match(output, /President/); + assert.match(output, /Obama/); + assert.match(output, /Morphology:/); + assert.match(output, /tag: 'NOUN'/); + }); + + it('should analyze syntax in a file', async () => { + const output = await exec(`${cmd} syntax-file ${bucketName} ${fileName}`); + assert.match(output, /NOUN:/); + assert.match(output, /President/); + assert.match(output, /Obama/); + assert.match(output, /Morphology:/); + assert.match(output, /tag: 'NOUN'/); + }); + + it('should analyze entity sentiment in text', async () => { + const output = await exec(`${cmd} entity-sentiment-text "${text}"`); + assert.match(output, /Entities and sentiments:/); + assert.match(output, /Obama/); + assert.match(output, /PERSON/); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); + + it('should analyze entity sentiment in a file', async () => { + const output = await exec( + `${cmd} entity-sentiment-file ${bucketName} ${fileName}` + ); + assert.match(output, /Entities and sentiments:/); + assert.match(output, /Obama/); + assert.match(output, /PERSON/); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); + + it('should classify text in a file', async () => { + const output = await exec( + `${cmd} classify-file ${bucketName} ${fileName2}` + ); + assert.match(output, /Name:/); + assert.match(output, /Computers & Electronics/); + }); + + it('should classify text in text', async () => { + const output = await exec(`${cmd} classify-text "${text2}"`); + assert.match(output, /Name:/); + assert.match(output, /Computers & Electronics/); + }); }); diff --git a/cloud-language/snippets/test/analyze.v1beta2.test.js b/cloud-language/snippets/test/analyze.v1beta2.test.js index 3602d9c861..9277805284 100644 --- a/cloud-language/snippets/test/analyze.v1beta2.test.js +++ b/cloud-language/snippets/test/analyze.v1beta2.test.js @@ -15,16 +15,16 @@ 'use strict'; -const fs = require(`fs`); -const path = require(`path`); -const {Storage} = require(`@google-cloud/storage`); -const storage = new Storage(); -const assert = require('assert'); -const tools = require(`@google-cloud/nodejs-repo-tools`); -const uuid = require(`uuid`); +const fs = require('fs'); +const path = require('path'); +const {Storage} = require('@google-cloud/storage'); +const {assert} = require('chai'); +const execa = require('execa'); +const uuid = require('uuid'); -const cmd = `node analyze.v1beta2.js`; -const cwd = path.join(__dirname, `..`); +const exec = async cmd => (await execa.shell(cmd)).stdout; +const storage = new Storage(); +const cmd = 'node analyze.v1beta2.js'; const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`; const fileName = `text.txt`; const fileName2 = `android_text.txt`; @@ -32,107 +32,94 @@ const localFilePath = path.join(__dirname, `../resources/${fileName}`); const localFilePath2 = path.join(__dirname, `../resources/${fileName2}`); const text = fs.readFileSync(localFilePath, 'utf-8'); const text2 = fs.readFileSync(localFilePath2, 'utf-8'); -const germanText = `Willkommen bei München`; +const germanText = 'Willkommen bei München'; -before(async () => { - tools.checkCredentials(); - const [bucket] = await storage.createBucket(bucketName); - await bucket.upload(localFilePath); - await bucket.upload(localFilePath2); -}); +describe('analyze.v1beta2', () => { + before(async () => { + const [bucket] = await storage.createBucket(bucketName); + await bucket.upload(localFilePath); + await bucket.upload(localFilePath2); + }); -after(async () => { - const bucket = storage.bucket(bucketName); - await bucket.deleteFiles({force: true}); - await bucket.deleteFiles({force: true}); // Try a second time... - await bucket.delete(); -}); + after(async () => { + const bucket = storage.bucket(bucketName); + await bucket.deleteFiles({force: true}); + await bucket.deleteFiles({force: true}); // Try a second time... + await bucket.delete(); + }); -beforeEach(async () => tools.stubConsole); -afterEach(async () => tools.restoreConsole); + it(`should analyze sentiment in text`, async () => { + const output = await exec(`${cmd} sentiment-text "${text}"`); + assert.match(output, /Document sentiment:/); + assert.match(output, new RegExp(`Sentence: ${text}`)); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); -it(`should analyze sentiment in text`, async () => { - const output = await tools.runAsync(`${cmd} sentiment-text "${text}"`, cwd); - assert(RegExp(`Document sentiment:`).test(output)); - assert(RegExp(`Sentence: ${text}`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); + it(`should analyze sentiment in a file`, async () => { + const output = await exec( + `${cmd} sentiment-file ${bucketName} ${fileName}` + ); + assert.match(output, /Document sentiment:/); + assert.match(output, new RegExp(`Sentence: ${text}`)); + assert.match(output, /Score: 0/); + assert.match(output, /Magnitude: 0/); + }); -it(`should analyze sentiment in a file`, async () => { - const output = await tools.runAsync( - `${cmd} sentiment-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`Document sentiment:`).test(output)); - assert(RegExp(`Sentence: ${text}`).test(output)); - assert(RegExp(`Score: 0`).test(output)); - assert(RegExp(`Magnitude: 0`).test(output)); -}); + it(`should analyze entities in text`, async () => { + const output = await exec(`${cmd} entities-text "${text}"`); + assert.match(output, /Obama/); + assert.match(output, /Type: PERSON/); + assert.match(output, /White House/); + assert.match(output, /Type: LOCATION/); + }); -it(`should analyze entities in text`, async () => { - const output = await tools.runAsync(`${cmd} entities-text "${text}"`, cwd); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Type: PERSON`).test(output)); - assert(RegExp(`White House`).test(output)); - assert(RegExp(`Type: LOCATION`).test(output)); -}); + it('should analyze entities in a file', async () => { + const output = await exec(`${cmd} entities-file ${bucketName} ${fileName}`); + assert.match(output, /Entities:/); + assert.match(output, /Type: PERSON/); + assert.match(output, /White House/); + assert.match(output, /Type: LOCATION/); + }); -it('should analyze entities in a file', async () => { - const output = await tools.runAsync( - `${cmd} entities-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`Entities:`).test(output)); - assert(RegExp(`Type: PERSON`).test(output)); - assert(RegExp(`White House`).test(output)); - assert(RegExp(`Type: LOCATION`).test(output)); -}); - -it(`should analyze syntax in text`, async () => { - const output = await tools.runAsync(`${cmd} syntax-text "${text}"`, cwd); - assert(RegExp(`Parts of speech:`).test(output)); - assert(RegExp(`NOUN:`).test(output)); - assert(RegExp(`President`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Morphology:`).test(output)); - assert(RegExp(`tag: 'NOUN'`).test(output)); -}); + it(`should analyze syntax in text`, async () => { + const output = await exec(`${cmd} syntax-text "${text}"`); + assert.match(output, /Parts of speech:/); + assert.match(output, /NOUN:/); + assert.match(output, /President/); + assert.match(output, /Obama/); + assert.match(output, /Morphology:/); + assert.match(output, /tag: 'NOUN'/); + }); -it('should analyze syntax in a file', async () => { - const output = await tools.runAsync( - `${cmd} syntax-file ${bucketName} ${fileName}`, - cwd - ); - assert(RegExp(`NOUN:`).test(output)); - assert(RegExp(`President`).test(output)); - assert(RegExp(`Obama`).test(output)); - assert(RegExp(`Morphology:`).test(output)); - assert(RegExp(`tag: 'NOUN'`).test(output)); -}); + it('should analyze syntax in a file', async () => { + const output = await exec(`${cmd} syntax-file ${bucketName} ${fileName}`); + assert.match(output, /NOUN:/); + assert.match(output, /President/); + assert.match(output, /Obama/); + assert.match(output, /Morphology:/); + assert.match(output, /tag: 'NOUN'/); + }); -it('should analyze syntax in a 1.1 language (German)', async () => { - const output = await tools.runAsync( - `${cmd} syntax-text "${germanText}"`, - cwd - ); - assert(RegExp(`Parts of speech:`).test(output)); - assert(RegExp(`ADV: Willkommen`).test(output)); - assert(RegExp(`ADP: bei`).test(output)); - assert(RegExp(`NOUN: München`).test(output)); -}); + it('should analyze syntax in a 1.1 language (German)', async () => { + const output = await exec(`${cmd} syntax-text "${germanText}"`); + assert.match(output, /Parts of speech:/); + assert.match(output, /ADV: Willkommen/); + assert.match(output, /ADP: bei/); + assert.match(output, /NOUN: München/); + }); -it('should classify text in a file', async () => { - const output = await tools.runAsync( - `${cmd} classify-file ${bucketName} ${fileName2}`, - cwd - ); - assert(RegExp(`Name:`).test(output)); - assert(RegExp(`Computers & Electronics`).test(output)); -}); + it('should classify text in a file', async () => { + const output = await exec( + `${cmd} classify-file ${bucketName} ${fileName2}` + ); + assert.match(output, /Name:/); + assert.match(output, /Computers & Electronics/); + }); -it('should classify text in text', async () => { - const output = await tools.runAsync(`${cmd} classify-text "${text2}"`, cwd); - assert(RegExp(`Name:`).test(output)); - assert(RegExp(`Computers & Electronics`).test(output)); + it('should classify text in text', async () => { + const output = await exec(`${cmd} classify-text "${text2}"`); + assert.match(output, /Name:/); + assert.match(output, /Computers & Electronics/); + }); }); diff --git a/cloud-language/snippets/test/automlNaturalLanguage.test.js b/cloud-language/snippets/test/automlNaturalLanguage.test.js new file mode 100644 index 0000000000..d4829a2209 --- /dev/null +++ b/cloud-language/snippets/test/automlNaturalLanguage.test.js @@ -0,0 +1,139 @@ +/** + * Copyright 2018, Google, LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +const {assert} = require('chai'); +const execa = require('execa'); + +const cmdDataset = 'node automl/automlNaturalLanguageDataset.js'; +const cmdModel = 'node automl/automlNaturalLanguageModel.js'; +const cmdPredict = 'node automl/automlNaturalLanguagePredict.js'; + +const testDataSetName = 'testDataset'; +const dummyDataSet = 'dummyDataset'; +const testModelName = 'dummyModel'; +const sampleText = './resources/test.txt'; +const projectId = process.env.GCLOUD_PROJECT; + +const exec = async cmd => (await execa.shell(cmd)).stdout; + +describe.skip('automl', () => { + // Skipped because it's been taking too long to delete datasets + it('should create a create, list, and delete a dataset', async () => { + // Check to see that this dataset does not yet exist + let output = await exec(`${cmdDataset} list-datasets`); + //t.false(output.includes(testDataSetName)); + assert.notMatch(output, /testDataset/); + + // Create dataset + output = await exec(`${cmdDataset} create-dataset -n "${testDataSetName}"`); + const parsedOut = output.split('\n'); + const dataSetId = parsedOut[1].split(':')[1].trim(); + assert.match(output, /Dataset display name: {2}testDataset/); + + // Delete dataset + output = await exec(`${cmdDataset} delete-dataset -i "${dataSetId}"`); + assert.match(output, /Dataset deleted./); + }); + + // See : https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/NaturalLanguage/automl/model_test.py + // We make two models running this test, see hard-coded workaround below + it('should create a dataset, import data, and start making a model', async () => { + // Check to see that this dataset does not yet exist + let output = await exec(`${cmdDataset} list-datasets`); + assert.notMatch(output, /dummyDataset/); + + // Create dataset + output = await exec(`${cmdDataset} create-dataset -n "${dummyDataSet}"`); + + const dataSetId = output + .split('\n')[1] + .split(':')[1] + .trim(); + assert.match(output, /Dataset display name: {2}dummyDataSet/); + + // Import Data + output = await exec( + `${cmdDataset} import-data -i "${dataSetId}" -p "gs://nodejs-docs-samples-vcm/happiness.csv"` + ); + assert.match(output, /Data imported./); + + // Check to make sure model doesn't already exist + output = await exec(`${cmdModel} list-models`); + assert.notMatch(output, /dummyModel/); + + // Begin training dataset, getting operation ID for next operation + output = await exec( + `${cmdModel} create-model -i "${dataSetId}" -m "${testModelName}" -t "2"` + ); + const operationName = output + .split('\n')[0] + .split(':')[1] + .trim(); + assert.match(output, /Training started.../); + + // Poll operation status, here confirming that operation is not complete yet + output = await exec( + `${cmdModel} get-operation-status -i "${dataSetId}" -o "${operationName}"` + ); + assert.match(output, /done: false/); + }); + + it('should display evaluation from prexisting model', async () => { + const donotdeleteModelId = `TCN4740161257642267869`; + + // Confirm dataset exists + let output = await exec(`${cmdDataset} list-datasets`); + assert.match(output, /dummyDb/); + + // List model evaluations, confirm model exists + output = await exec( + `${cmdModel} list-model-evaluations -a "${donotdeleteModelId}"` + ); + + // Display evaluation + output = await exec( + `${cmdModel} display-evaluation -a "${donotdeleteModelId}"` + ); + assert.match(output, /Model Precision:/); + }); + + it('should run Prediction from prexisting model', async () => { + const donotdeleteModelId = `TCN4740161257642267869`; + + // Confirm dataset exists + let output = await exec(`${cmdDataset} list-datasets`); + assert.match(output, /do_not_delete_me/); + + // List model evaluations, confirm model exists + output = await exec( + `${cmdModel} list-model-evaluations -a "${donotdeleteModelId}"` + ); + assert.match(output, /classificationEvaluationMetrics:/); + + // Run prediction on 'test.txt' in resources folder + output = await exec( + `${cmdPredict} predict -i "${donotdeleteModelId}" -f "${sampleText}" -s "0.5"` + ); + assert.match(output, /Firm_Cheese/); + }); + + // List datasets + it('should list datasets', async () => { + const output = await exec(`${cmdDataset} list-datasets ${projectId}`); + assert.match(output, /List of datasets:/); + }); +}); diff --git a/cloud-language/snippets/test/mocha.opts b/cloud-language/snippets/test/mocha.opts deleted file mode 100644 index 2a50a27188..0000000000 --- a/cloud-language/snippets/test/mocha.opts +++ /dev/null @@ -1,2 +0,0 @@ ---timeout 20000 ---throw-deprecation diff --git a/cloud-language/snippets/test/quickstart.test.js b/cloud-language/snippets/test/quickstart.test.js index 12dd4f193d..e4fa6f8bd1 100644 --- a/cloud-language/snippets/test/quickstart.test.js +++ b/cloud-language/snippets/test/quickstart.test.js @@ -15,19 +15,14 @@ 'use strict'; -const path = require(`path`); -const assert = require('assert'); -const tools = require(`@google-cloud/nodejs-repo-tools`); +const {assert} = require('chai'); +const execa = require('execa'); -const cmd = `node quickstart.js`; -const cwd = path.join(__dirname, `..`); - -beforeEach(async () => tools.stubConsole); -afterEach(async () => tools.restoreConsole); - -it(`should analyze sentiment in text`, async () => { - const output = await tools.runAsync(cmd, cwd); - assert(RegExp('Text: Hello, world!').test(output)); - assert(RegExp('Sentiment score: ').test(output)); - assert(RegExp('Sentiment magnitude: ').test(output)); +describe('quickstart', () => { + it('should analyze sentiment in text', async () => { + const {stdout} = await execa.shell('node quickstart.js'); + assert(stdout, /Text: Hello, world!/); + assert(stdout, /Sentiment score: /); + assert(stdout, /Sentiment magnitude: /); + }); });