diff --git a/vision/samples/detect.js b/vision/samples/detect.js index 398c040d95..e636d5d9fd 100644 --- a/vision/samples/detect.js +++ b/vision/samples/detect.js @@ -821,7 +821,7 @@ function detectPdfText(bucketName, fileName) { return operation.promise(); }) .then(filesResponse => { - let destinationUri = + const destinationUri = filesResponse[0].responses[0].outputConfig.gcsDestination.uri; console.log('Json saved to: ' + destinationUri); }) diff --git a/vision/samples/faceDetection.js b/vision/samples/faceDetection.js index d9c75e15c8..45921271bd 100644 --- a/vision/samples/faceDetection.js +++ b/vision/samples/faceDetection.js @@ -20,13 +20,13 @@ // specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable and use // the project specified by the GCLOUD_PROJECT environment variable. See // https://googlecloudplatform.github.io/gcloud-node/#/docs/google-cloud/latest/guides/authentication -let vision = require('@google-cloud/vision'); +const vision = require('@google-cloud/vision'); // [END vision_face_detection_tutorial_imports] // [START vision_face_detection_tutorial_client] // Creates a client -let client = new vision.ImageAnnotatorClient(); +const client = new vision.ImageAnnotatorClient(); -let fs = require('fs'); +const fs = require('fs'); // [END vision_face_detection_tutorial_client] /** @@ -40,7 +40,7 @@ function detectFaces(inputFile, callback) { .faceDetection(request) .then(results => { const faces = results[0].faceAnnotations; - let numFaces = faces.length; + const numFaces = faces.length; console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces')); callback(null, faces); }) @@ -61,12 +61,12 @@ function highlightFaces(inputFile, faces, outputFile, Canvas, callback) { return callback(err); } - let Image = Canvas.Image; + const Image = Canvas.Image; // Open the original image into a canvas - let img = new Image(); + const img = new Image(); img.src = image; - let canvas = new Canvas(img.width, img.height); - let context = canvas.getContext('2d'); + const canvas = new Canvas(img.width, img.height); + const context = canvas.getContext('2d'); context.drawImage(img, 0, 0, img.width, img.height); // Now draw boxes around all the faces @@ -90,8 +90,8 @@ function highlightFaces(inputFile, faces, outputFile, Canvas, callback) { // Write the result to a file console.log('Writing to file ' + outputFile); - let writeStream = fs.createWriteStream(outputFile); - let pngStream = canvas.pngStream(); + const writeStream = fs.createWriteStream(outputFile); + const pngStream = canvas.pngStream(); pngStream.on('data', chunk => { writeStream.write(chunk); @@ -131,7 +131,7 @@ if (module === require.main) { // eslint-disable-next-line no-process-exit process.exit(1); } - let inputFile = process.argv[2]; - let outputFile = process.argv[3]; + const inputFile = process.argv[2]; + const outputFile = process.argv[3]; exports.main(inputFile, outputFile, require('canvas'), console.log); } diff --git a/vision/samples/productSearch/importProductSets.v1p3beta1.js b/vision/samples/productSearch/importProductSets.v1p3beta1.js index 24b057610f..12b58bf0a5 100644 --- a/vision/samples/productSearch/importProductSets.v1p3beta1.js +++ b/vision/samples/productSearch/importProductSets.v1p3beta1.js @@ -53,7 +53,7 @@ function importProductSets(projectId, location, gcsUri) { console.log('Processing done.'); console.log('Results of the processing:'); - for (let i in responses[0].statuses) { + for (const i in responses[0].statuses) { console.log( 'Status of processing ', i, diff --git a/vision/samples/system-test/faceDetection.test.js b/vision/samples/system-test/faceDetection.test.js index d9c1ba3cb7..811e7f620b 100644 --- a/vision/samples/system-test/faceDetection.test.js +++ b/vision/samples/system-test/faceDetection.test.js @@ -59,7 +59,7 @@ test.after.always(tools.restoreConsole); test.cb(`should detect faces`, t => { let done = false; - let timeout = setTimeout(() => { + const timeout = setTimeout(() => { if (!done) { console.warn('Face detection timed out!'); t.end(); diff --git a/vision/samples/textDetection.js b/vision/samples/textDetection.js index f46e81877c..18dfffcc00 100644 --- a/vision/samples/textDetection.js +++ b/vision/samples/textDetection.js @@ -15,27 +15,27 @@ 'use strict'; -let async = require('async'); -let fs = require('fs'); -let path = require('path'); +const async = require('async'); +const fs = require('fs'); +const path = require('path'); // By default, the client will authenticate using the service account file // specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable and use // the project specified by the GCLOUD_PROJECT environment variable. See // https://googlecloudplatform.github.io/gcloud-node/#/docs/google-cloud/latest/guides/authentication -let vision = require('@google-cloud/vision'); -let natural = require('natural'); -let redis = require('redis'); +const vision = require('@google-cloud/vision'); +const natural = require('natural'); +const redis = require('redis'); // Instantiate a vision client -let client = new vision.ImageAnnotatorClient(); +const client = new vision.ImageAnnotatorClient(); function Index() { // Connect to a redis server. - let TOKEN_DB = 0; - let DOCS_DB = 1; - let PORT = process.env.REDIS_PORT || '6379'; - let HOST = process.env.REDIS_HOST || '127.0.0.1'; + const TOKEN_DB = 0; + const DOCS_DB = 1; + const PORT = process.env.REDIS_PORT || '6379'; + const HOST = process.env.REDIS_HOST || '127.0.0.1'; this.tokenClient = redis .createClient(PORT, HOST, { @@ -59,12 +59,12 @@ Index.prototype.quit = function() { }; Index.prototype.add = function(filename, document, callback) { - let self = this; - let PUNCTUATION = ['.', ',', ':', '']; - let tokenizer = new natural.WordTokenizer(); - let tokens = tokenizer.tokenize(document); + const self = this; + const PUNCTUATION = ['.', ',', ':', '']; + const tokenizer = new natural.WordTokenizer(); + const tokens = tokenizer.tokenize(document); - let tasks = tokens + const tasks = tokens .filter(function(token) { return PUNCTUATION.indexOf(token) === -1; }) @@ -82,8 +82,8 @@ Index.prototype.add = function(filename, document, callback) { }; Index.prototype.lookup = function(words, callback) { - let self = this; - let tasks = words.map(function(word) { + const self = this; + const tasks = words.map(function(word) { word = word.toLowerCase(); return function(cb) { self.tokenClient.smembers(word, cb); @@ -114,7 +114,7 @@ Index.prototype.setContainsNoText = function(filename, callback) { }; function lookup(words, callback) { - let index = new Index(); + const index = new Index(); index.lookup(words, function(err, hits) { index.quit(); if (err) { @@ -146,9 +146,9 @@ function extractDescriptions(filename, index, response, callback) { function getTextFromFiles(index, inputFiles, callback) { // Make a call to the Vision API to detect text - let requests = []; + const requests = []; inputFiles.forEach(filename => { - let request = { + const request = { image: {content: fs.readFileSync(filename).toString('base64')}, features: [{type: 'TEXT_DETECTION'}], }; @@ -157,11 +157,11 @@ function getTextFromFiles(index, inputFiles, callback) { client .batchAnnotateImages({requests: requests}) .then(results => { - let detections = results[0].responses; - let textResponse = {}; - let tasks = []; + const detections = results[0].responses; + const textResponse = {}; + const tasks = []; inputFiles.forEach(function(filename, i) { - let response = detections[i]; + const response = detections[i]; if (response.error) { console.log('API Error for ' + filename, response.error); return; @@ -186,7 +186,7 @@ function getTextFromFiles(index, inputFiles, callback) { // Run the example function main(inputDir, callback) { - let index = new Index(); + const index = new Index(); async.waterfall( [ @@ -198,7 +198,7 @@ function main(inputDir, callback) { function(files, cb) { async.parallel( files.map(function(file) { - let filename = path.join(inputDir, file); + const filename = path.join(inputDir, file); return function(cb) { fs.stat(filename, function(err, stats) { if (err) { @@ -216,7 +216,7 @@ function main(inputDir, callback) { }, // Figure out which files have already been processed function(allImageFiles, cb) { - let tasks = allImageFiles + const tasks = allImageFiles .filter(function(filename) { return filename; }) @@ -256,7 +256,7 @@ function main(inputDir, callback) { } if (module === require.main) { - let generalError = + const generalError = 'Usage: node textDetection ...\n\n' + '\tCommands: analyze, lookup'; if (process.argv.length < 3) { @@ -264,8 +264,8 @@ if (module === require.main) { // eslint-disable-next-line no-process-exit process.exit(1); } - let args = process.argv.slice(2); - let command = args.shift(); + const args = process.argv.slice(2); + const command = args.shift(); if (command === 'analyze') { if (!args.length) { console.log('Usage: node textDetection analyze ');