diff --git a/package.json b/package.json index e969b3ae..6bbdaf66 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "@google-cloud/common": "^0.7.0", "events-intercept": "^2.0.0", "extend": "^3.0.0", - "google-gax": "^0.7.0", + "google-gax": "^0.8.1", "google-proto-files": "^0.8.0", "is": "^3.1.0", "modelo": "^4.2.0", diff --git a/src/index.js b/src/index.js index ec6f9c7a..f3c7e446 100644 --- a/src/index.js +++ b/src/index.js @@ -659,7 +659,10 @@ Speech.prototype.recognize = function(file, config, callback) { return; } - self.api.Speech.syncRecognize(config, foundFile, function(err, resp) { + self.api.Speech.syncRecognize({ + config: config, + audio: foundFile + }, function(err, resp) { if (err) { callback(err, null, resp); return; @@ -799,7 +802,10 @@ Speech.prototype.startRecognition = function(file, config, callback) { return; } - self.api.Speech.asyncRecognize(config, foundFile, function(err, resp) { + self.api.Speech.asyncRecognize({ + config: config, + audio: foundFile + }, function(err, resp) { if (err) { callback(err, null, resp); return; diff --git a/src/v1beta1/speech_api.js b/src/v1beta1/speech_api.js index ebc4582a..b6e3b1f2 100644 --- a/src/v1beta1/speech_api.js +++ b/src/v1beta1/speech_api.js @@ -108,12 +108,14 @@ function SpeechApi(gaxGrpc, grpcClients, opts) { * Perform synchronous speech-recognition: receive results after all audio * has been sent and processed. * - * @param {Object} config + * @param {Object} request + * The request object that will be sent. + * @param {Object} request.config * [Required] The `config` message provides information to the recognizer * that specifies how to process the request. * * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig} - * @param {Object} audio + * @param {Object} request.audio * [Required] The audio data to be recognized. * * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio} @@ -124,27 +126,25 @@ function SpeechApi(gaxGrpc, grpcClients, opts) { * The function which will be called with the result of the API call. * * The second parameter to the callback is an object representing [SyncRecognizeResponse]{@link SyncRecognizeResponse} - * @returns {gax.EventEmitter} - the event emitter to handle the call - * status. + * @returns {Promise} - The promise which resolves to the response object. + * The promise has a method named "cancel" which cancels the ongoing API call. * * @example * * var api = speechV1beta1.speechApi(); * var config = {}; * var audio = {}; - * api.syncRecognize(config, audio, function(err, response) { - * if (err) { - * console.error(err); - * return; - * } + * var request = { + * config: config, + * audio: audio + * }; + * api.syncRecognize(request).then(function(response) { * // doThingsWith(response) + * }).catch(function(err) { + * console.error(err); * }); */ -SpeechApi.prototype.syncRecognize = function syncRecognize( - config, - audio, - options, - callback) { +SpeechApi.prototype.syncRecognize = function(request, options, callback) { if (options instanceof Function && callback === undefined) { callback = options; options = {}; @@ -152,11 +152,7 @@ SpeechApi.prototype.syncRecognize = function syncRecognize( if (options === undefined) { options = {}; } - var req = { - config: config, - audio: audio - }; - return this._syncRecognize(req, options, callback); + return this._syncRecognize(request, options, callback); }; /** @@ -165,12 +161,14 @@ SpeechApi.prototype.syncRecognize = function syncRecognize( * `Operation.error` or an `Operation.response` which contains * an `AsyncRecognizeResponse` message. * - * @param {Object} config + * @param {Object} request + * The request object that will be sent. + * @param {Object} request.config * [Required] The `config` message provides information to the recognizer * that specifies how to process the request. * * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig} - * @param {Object} audio + * @param {Object} request.audio * [Required] The audio data to be recognized. * * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio} @@ -181,27 +179,25 @@ SpeechApi.prototype.syncRecognize = function syncRecognize( * The function which will be called with the result of the API call. * * The second parameter to the callback is an object representing [google.longrunning.Operation]{@link external:"google.longrunning.Operation"} - * @returns {gax.EventEmitter} - the event emitter to handle the call - * status. + * @returns {Promise} - The promise which resolves to the response object. + * The promise has a method named "cancel" which cancels the ongoing API call. * * @example * * var api = speechV1beta1.speechApi(); * var config = {}; * var audio = {}; - * api.asyncRecognize(config, audio, function(err, response) { - * if (err) { - * console.error(err); - * return; - * } + * var request = { + * config: config, + * audio: audio + * }; + * api.asyncRecognize(request).then(function(response) { * // doThingsWith(response) + * }).catch(function(err) { + * console.error(err); * }); */ -SpeechApi.prototype.asyncRecognize = function asyncRecognize( - config, - audio, - options, - callback) { +SpeechApi.prototype.asyncRecognize = function(request, options, callback) { if (options instanceof Function && callback === undefined) { callback = options; options = {}; @@ -209,11 +205,7 @@ SpeechApi.prototype.asyncRecognize = function asyncRecognize( if (options === undefined) { options = {}; } - var req = { - config: config, - audio: audio - }; - return this._asyncRecognize(req, options, callback); + return this._asyncRecognize(request, options, callback); }; function SpeechApiBuilder(gaxGrpc) { diff --git a/test/index.js b/test/index.js index e1b414f1..f5b991a3 100644 --- a/test/index.js +++ b/test/index.js @@ -647,13 +647,13 @@ describe('Speech', function() { it('should make the correct request', function(done) { speech.api.Speech = { - syncRecognize: function(config, file) { + syncRecognize: function(reqOpts) { var expectedConfig = extend({}, CONFIG, { encoding: DETECTED_ENCODING }); - assert.deepEqual(config, expectedConfig); - assert.strictEqual(file, FOUND_FILE); + assert.deepEqual(reqOpts.config, expectedConfig); + assert.strictEqual(reqOpts.audio, FOUND_FILE); done(); } @@ -672,8 +672,8 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: function(config_) { - assert.strictEqual(config_.encoding, config.encoding); + syncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.encoding, config.encoding); done(); } }; @@ -690,8 +690,8 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: function(config) { - assert.strictEqual(config.encoding, expectedEncoding); + syncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.encoding, expectedEncoding); done(); } }; @@ -718,7 +718,7 @@ describe('Speech', function() { beforeEach(function() { speech.api.Speech = { - syncRecognize: function(config, file, callback) { + syncRecognize: function(reqOpts, callback) { callback(error, apiResponse); } }; @@ -757,7 +757,7 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: function(config, file, callback) { + syncRecognize: function(reqOpts, callback) { callback(null, apiResponse); } }; @@ -815,8 +815,8 @@ describe('Speech', function() { it('should delete verbose option from request object', function(done) { speech.api.Speech = { - syncRecognize: function(config) { - assert.strictEqual(config.verbose, undefined); + syncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.verbose, undefined); done(); } }; @@ -861,13 +861,13 @@ describe('Speech', function() { it('should make the correct request', function(done) { speech.api.Speech = { - asyncRecognize: function(config, file) { + asyncRecognize: function(reqOpts) { var expectedConfig = extend({}, CONFIG, { encoding: DETECTED_ENCODING }); - assert.deepEqual(config, expectedConfig); - assert.strictEqual(file, FOUND_FILE); + assert.deepEqual(reqOpts.config, expectedConfig); + assert.strictEqual(reqOpts.audio, FOUND_FILE); done(); } @@ -886,8 +886,8 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: function(config_) { - assert.strictEqual(config_.encoding, config.encoding); + asyncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.encoding, config.encoding); done(); } }; @@ -904,8 +904,8 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: function(config) { - assert.strictEqual(config.encoding, expectedEncoding); + asyncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.encoding, expectedEncoding); done(); } }; @@ -932,7 +932,7 @@ describe('Speech', function() { beforeEach(function() { speech.api.Speech = { - asyncRecognize: function(config, file, callback) { + asyncRecognize: function(reqOpts, callback) { callback(error, apiResponse); } }; @@ -978,7 +978,7 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: function(config, file, callback) { + asyncRecognize: function(reqOpts, callback) { callback(null, apiResponse); } }; @@ -1058,8 +1058,8 @@ describe('Speech', function() { it('should delete verbose option from request object', function(done) { speech.api.Speech = { - asyncRecognize: function(config) { - assert.strictEqual(config.verbose, undefined); + asyncRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.verbose, undefined); done(); } };