com.fasterxml.jackson.core
diff --git a/quickstart/java/jre/virtual-assistant/src/com/speechsdk/quickstart/Main.java b/quickstart/java/jre/virtual-assistant/src/com/speechsdk/quickstart/Main.java
index 31237f78d..ed3a188bb 100644
--- a/quickstart/java/jre/virtual-assistant/src/com/speechsdk/quickstart/Main.java
+++ b/quickstart/java/jre/virtual-assistant/src/com/speechsdk/quickstart/Main.java
@@ -7,7 +7,7 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.joda.JodaModule;
-import com.microsoft.bot.schema.models.Activity;
+import com.microsoft.bot.schema.Activity;
import com.microsoft.cognitiveservices.speech.audio.AudioConfig;
import com.microsoft.cognitiveservices.speech.audio.PullAudioOutputStream;
import com.microsoft.cognitiveservices.speech.dialog.BotFrameworkConfig;
@@ -146,10 +146,10 @@ private static void registerEventListeners(final DialogServiceConnector dialogSe
try {
Activity activity = mapper.readValue(act, Activity.class);
- if (StringUtils.isNotBlank(activity.text()) || StringUtils.isNotBlank(activity.speak())) {
+ if (StringUtils.isNotBlank(activity.getText()) || StringUtils.isNotBlank(activity.getSpeak())) {
receivedResponse = true;
System.out.println(String.format("Response: \n\t Text: %s \n\t Speech: %s",
- activity.text(), activity.speak()));
+ activity.getText(), activity.getSpeak()));
}
} catch (IOException e) {
log.error("IO exception thrown when deserializing the bot response. ErrorMessage:", e.getMessage(), e);
diff --git a/quickstart/javascript/browser/from-file/README.md b/quickstart/javascript/browser/from-file/README.md
new file mode 100644
index 000000000..bf2c37b52
--- /dev/null
+++ b/quickstart/javascript/browser/from-file/README.md
@@ -0,0 +1,45 @@
+# Quickstart: Recognize speech in JavaScript on a Web Browser.
+
+These samples demonstrate how to recognize speech from a file using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome.
+* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-file?pivots=programming-language-javascript) on the SDK documentation page which describes how to build this sample from scratch in your favorite editor.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A PC or Mac, with a working microphone.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-file?pivots=programming-language-javascript).
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* The web server must be secure (HTTPS).
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+* Use the input fields to set your `subscription key` and `service region`.
+* Press the `Start recognition` button to start recognizing speech.
+
+## Running .html samples
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-file?pivots=programming-language-javascript)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
diff --git a/quickstart/javascript/browser/from-file/index.html b/quickstart/javascript/browser/from-file/index.html
new file mode 100644
index 000000000..a3505c3b6
--- /dev/null
+++ b/quickstart/javascript/browser/from-file/index.html
@@ -0,0 +1,135 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart
+
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/token.php b/quickstart/javascript/browser/from-file/token.php
similarity index 100%
rename from quickstart/javascript/browser/token.php
rename to quickstart/javascript/browser/from-file/token.php
diff --git a/quickstart/javascript/browser/from-microphone/README.md b/quickstart/javascript/browser/from-microphone/README.md
new file mode 100644
index 000000000..8b190e46d
--- /dev/null
+++ b/quickstart/javascript/browser/from-microphone/README.md
@@ -0,0 +1,39 @@
+# Quickstart: Synthesize speech in JavaScript on a Web Browser.
+
+These samples demonstrate how to synthesize speech using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A PC or Mac, with a working speaker.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+ * Use the input fields to set your `subscription key` and `service region`.
+ * Input a text to be synthesized in the `Text` field and press the `Start synthesis` button to start synthesizing speech.
+
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+## References
+
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
\ No newline at end of file
diff --git a/quickstart/javascript/browser/index.html b/quickstart/javascript/browser/from-microphone/index.html
similarity index 100%
rename from quickstart/javascript/browser/index.html
rename to quickstart/javascript/browser/from-microphone/index.html
diff --git a/quickstart/javascript/browser/multi-device-conversation.html b/quickstart/javascript/browser/from-microphone/multi-device-conversation.html
similarity index 100%
rename from quickstart/javascript/browser/multi-device-conversation.html
rename to quickstart/javascript/browser/from-microphone/multi-device-conversation.html
diff --git a/quickstart/javascript/browser/from-microphone/token.php b/quickstart/javascript/browser/from-microphone/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/from-microphone/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/browser/intent-recognition/README.md b/quickstart/javascript/browser/intent-recognition/README.md
new file mode 100644
index 000000000..dcb9fde08
--- /dev/null
+++ b/quickstart/javascript/browser/intent-recognition/README.md
@@ -0,0 +1,53 @@
+# Quickstart: Using the Speech Service to Recognize intent in JavaScript on a Web Browser.
+
+This sample shows how to recognize intent with the Speech Service using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome. It illustrates how the SDK can be used to translate speech in one language to text in a different language.
+
+* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-intent-recognition-js-browser) on the SDK documentation page for step-by-step instructions.
+* See the [overview article](https://docs.microsoft.com/azure/cognitive-services/speech-service/intent-recognition) on the SDK documentation page to learn more about intent recognition using the Speech Service.
+
+## Prerequisites
+
+* A LUIS account. You can get one for free through the [LUIS portal](https://www.luis.ai/home).
+* A new or existing LUIS app - [create LUIS app](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/intent-recognition#create-a-luis-app-for-intent-recognition)
+* A PC or Mac, with a working microphone.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser).
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* The web server must be secure (HTTPS).
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+* To run this sample, you will need to create or use an existing LUIS app - [create LUIS app](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/intent-recognition#create-a-luis-app-for-intent-recognition)
+* Use the input fields to set your `LUIS Primary key` and `LUIS Location`.
+* From the LUIS app portal, in the Manage menu, copy the App ID from the Application Settings screen into the `LUIS App ID` field.
+* Press the `Start Intent Recognition` button to start recognizing speech intents from your microphone.
+
+## Running .html samples
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+> Note: On Safari, the sample web page needs to be hosted on a web server; Safari doesn't allow websites loaded from a local file to use the microphone.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
+* [Speech SDK Intent Recognition](https://docs.microsoft.com/azure/cognitive-services/speech-service/intent-recognition)
diff --git a/quickstart/javascript/browser/intent-recognition/index.html b/quickstart/javascript/browser/intent-recognition/index.html
new file mode 100644
index 000000000..d3c84a299
--- /dev/null
+++ b/quickstart/javascript/browser/intent-recognition/index.html
@@ -0,0 +1,186 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/intent-recognition/token.php b/quickstart/javascript/browser/intent-recognition/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/intent-recognition/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/browser/README.md b/quickstart/javascript/browser/multi-device-conversation/README.md
similarity index 86%
rename from quickstart/javascript/browser/README.md
rename to quickstart/javascript/browser/multi-device-conversation/README.md
index d370ead56..cf9bfc882 100644
--- a/quickstart/javascript/browser/README.md
+++ b/quickstart/javascript/browser/multi-device-conversation/README.md
@@ -1,7 +1,6 @@
# Quickstart: Recognize speech in JavaScript on a Web Browser.
These samples demonstrate how to recognize speech, and how to create or join a multi-device conversation with real-time transcriptions and translations, using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome.
-* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser) on the SDK documentation page which describes how to build this sample from scratch in your favorite editor.
* See the [overview article](https://docs.microsoft.com/azure/cognitive-services/speech-service/multi-device-conversation) on the SDK documentation page to learn more about Multi-device Conversation (Preview).
## Prerequisites
@@ -27,15 +26,9 @@ If you want to host the sample on a web server:
* Replace the string `YourServiceRegion` with the service region of your subscription.
For example, replace with `westus` if you are using the 30-day free trial subscription.
* Replace the string `YourSubscriptionKey` with your own subscription key.
-* Edit the `index.html` or `multi-device-conversation.html` source:
+* Edit the `multi-device-conversation.html` source:
* Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
* Deploy all files to your web server.
-
-## Run the `index.html` sample
-
-* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
-* Use the input fields to set your `subscription key` and `service region`.
-* Press the `Start recognition` button to start recognizing speech.
## Run the `multi-device-conversation.html` sample
@@ -66,7 +59,6 @@ If you want to host the sample on a web server:
## References
-* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser)
* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
* [Speech SDK Multi-device Conversation (Preview)](https://docs.microsoft.com/azure/cognitive-services/speech-service/multi-device-conversation)
* [Language support for Multi-device conversation (Preview)](https://docs.microsoft.com/azure/cognitive-services/speech-service/multi-device-conversation#language-support)
\ No newline at end of file
diff --git a/quickstart/javascript/browser/multi-device-conversation/multi-device-conversation.html b/quickstart/javascript/browser/multi-device-conversation/multi-device-conversation.html
new file mode 100644
index 000000000..c9fc26385
--- /dev/null
+++ b/quickstart/javascript/browser/multi-device-conversation/multi-device-conversation.html
@@ -0,0 +1,745 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart for Multi-Device Conversation (Preview)
+
+
+
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/multi-device-conversation/token.php b/quickstart/javascript/browser/multi-device-conversation/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/multi-device-conversation/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/browser/text-to-speech/README.md b/quickstart/javascript/browser/text-to-speech/README.md
new file mode 100644
index 000000000..a794ee94b
--- /dev/null
+++ b/quickstart/javascript/browser/text-to-speech/README.md
@@ -0,0 +1,51 @@
+# Quickstart: Using the Speech Service in JavaScript on a Web Browser.
+
+This sample shows how to use the Speech Service using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome. It illustrates how the SDK can be used to synthesize speech to speaker output.
+
+* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-text-to-speech-js-browser) on the SDK documentation page for step-by-step instructions.
+* See the [overview article](https://docs.microsoft.com/azure/cognitive-services/speech-service/text-to-speech) on the SDK documentation page to learn more about Text to Speech.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A PC or Mac, with a working microphone.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser).
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* The web server must be secure (HTTPS).
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+* Use the input fields to set your `subscription key` and `service region`.
+* Enter the text to process in the `Input Text` field.
+* Press the `Start Text to Speech` button to start parsing text.
+
+## Running .html samples
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+> Note: On Safari, the sample web page needs to be hosted on a web server; Safari doesn't allow websites loaded from a local file to use the microphone.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
+* [Speech SDK Text to Speech](https://docs.microsoft.com/azure/cognitive-services/speech-service/text-to-speech)
\ No newline at end of file
diff --git a/quickstart/javascript/browser/text-to-speech/index.html b/quickstart/javascript/browser/text-to-speech/index.html
new file mode 100644
index 000000000..478a51ffa
--- /dev/null
+++ b/quickstart/javascript/browser/text-to-speech/index.html
@@ -0,0 +1,171 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/text-to-speech/token.php b/quickstart/javascript/browser/text-to-speech/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/text-to-speech/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/README.md b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/README.md
new file mode 100644
index 000000000..5f722dc44
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/README.md
@@ -0,0 +1,52 @@
+# Quickstart: Using the Speech Service to Translate Speech to multiple languages in JavaScript on a Web Browser.
+
+This sample shows how to translate speech into multiple languages with the Speech Service using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome. It illustrates how the SDK can be used to translate speech in one language to text in a different language.
+
+* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-translate-speech-to-text-multiple-languages-js-browser) on the SDK documentation page for step-by-step instructions.
+* See the [overview article](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-translation) on the SDK documentation page to learn more about translation using the Speech Service.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A PC or Mac, with a working microphone.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser).
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* The web server must be secure (HTTPS).
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+* Use the input fields to set your `subscription key` and `service region`.
+* Select the source language from the `Source Language` dropdown.
+* Select the two target output languages from the `Target Language` dropdowns.
+* Press the `Start recognition` button to start recognizing speech from your microphone.
+
+## Running .html samples
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+> Note: On Safari, the sample web page needs to be hosted on a web server; Safari doesn't allow websites loaded from a local file to use the microphone.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
+* [Speech SDK Speech Translation](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-translation)
\ No newline at end of file
diff --git a/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/index.html b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/index.html
new file mode 100644
index 000000000..8957e55a5
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/index.html
@@ -0,0 +1,221 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/token.php b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text-multiple-languages/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/browser/translate-speech-to-text/README.md b/quickstart/javascript/browser/translate-speech-to-text/README.md
new file mode 100644
index 000000000..a01855e19
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text/README.md
@@ -0,0 +1,51 @@
+# Quickstart: Using the Speech Service to Translate Speech in JavaScript on a Web Browser.
+
+This sample shows how to translate speech with the Speech Service using the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome. It illustrates how the SDK can be used to translate speech in one language to text in a different language.
+
+* See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-translate-speech-to-text-js-browser) on the SDK documentation page for step-by-step instructions.
+* See the [overview article](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-translation) on the SDK documentation page to learn more about translation using the Speech Service.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A PC or Mac, with a working microphone.
+* A text editor.
+* Optionally, a web server that supports hosting PHP scripts.
+
+## Build the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser).
+
+* **By downloading the Microsoft Cognitive Services Speech SDK when building this sample, you acknowledge its license, see [Speech SDK license agreement](https://docs.microsoft.com/azure/cognitive-services/speech-service/license).**
+* [Download the sample code to your development PC.](../../../README.md#get-the-samples)
+* From the [Speech SDK for JavaScript .zip package](https://aka.ms/csspeech/jsbrowserpackage) extract the file
+ `microsoft.cognitiveservices.speech.sdk.bundle.js` and place it into the folder that contains this quickstart.
+
+If you want to host the sample on a web server:
+
+* The web server must be secure (HTTPS).
+* Edit the `token.php` source:
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+* Edit the `index.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* Deploy all files to your web server.
+
+## Run the `index.html` sample
+
+* In case you are running the sample from your local computer, open `index.html` from the location where you have downloaded this quickstart with a JavaScript capable browser.
+* Use the input fields to set your `subscription key` and `service region`.
+* Select the source and target languages from the `Source Language` and `Target Language` dropdowns.
+* Press the `Start recognition` button to start recognizing speech from your microphone.
+
+## Running .html samples
+* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
+
+> Note: On Safari, the sample web page needs to be hosted on a web server; Safari doesn't allow websites loaded from a local file to use the microphone.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
+* [Speech SDK Speech Translation](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-translation)
\ No newline at end of file
diff --git a/quickstart/javascript/browser/translate-speech-to-text/index.html b/quickstart/javascript/browser/translate-speech-to-text/index.html
new file mode 100644
index 000000000..1e3932d6a
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text/index.html
@@ -0,0 +1,184 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Quickstart
+
+
+
+
+
Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/quickstart/javascript/browser/translate-speech-to-text/token.php b/quickstart/javascript/browser/translate-speech-to-text/token.php
new file mode 100644
index 000000000..a98e5fb26
--- /dev/null
+++ b/quickstart/javascript/browser/translate-speech-to-text/token.php
@@ -0,0 +1,15 @@
+
diff --git a/quickstart/javascript/node/from-blob/README.md b/quickstart/javascript/node/from-blob/README.md
new file mode 100644
index 000000000..4cbf308cd
--- /dev/null
+++ b/quickstart/javascript/node/from-blob/README.md
@@ -0,0 +1,31 @@
+# Quickstart: Recognize speech in JavaScript on Node.js.
+
+This sample demonstrates how to recognize speech with the Speech SDK for JavaScript on Node.js.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/from-blob?pivots=programming-language-javacript) on the SDK documentation page which describes how to build this sample from scratch in your favourite editor.
+
+## Prerequisites
+
+* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
+* A [Node.js](https://nodejs.org) compatible device.
+
+## Prepare the sample
+
+> Note: more detailed step-by-step instructions are available [here](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/from-blob?pivots=programming-language-javascript).
+
+* [Download the sample code to your development PC.](/README.md#get-the-samples)
+* Open a command prompt at the quickstart directory, and run `npm install` to install the dependencies of the quickstart.
+ This will place the Speech SDK library in the `node_modules` directory.
+* Update the `from-blob.js` file with your configuration:
+ * Replace the string `YourSubscriptionKey` with your own subscription key.
+ * Replace the string `YourServiceRegion` with the service region of your subscription.
+ For example, replace with `westus` if you are using the 30-day free trial subscription.
+ * Replace the string `YourAudioFile.wav` with a SAS URL for the wave file to be transcribed. **(required format: 16 kHz sample rate, 16 bit samples, mono / single-channel)**.
+
+## Run the sample
+
+Execute `node index.js` from the location where you have downloaded this quickstart.
+
+## References
+
+* [Quickstart article on the SDK documentation site](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/from-blob?pivots=programming-language-javascript)
+* [Speech SDK API reference for JavaScript](https://aka.ms/csspeech/javascriptref)
diff --git a/quickstart/javascript/node/from-blob/from-blob.js b/quickstart/javascript/node/from-blob/from-blob.js
new file mode 100644
index 000000000..ab54baa99
--- /dev/null
+++ b/quickstart/javascript/node/from-blob/from-blob.js
@@ -0,0 +1,178 @@
+
+(function () {
+ "use strict";
+ var https = require("https");
+
+ // Replace with your subscription key
+ SubscriptionKey = "YourSubscriptionKey";
+
+ // Update with your service region
+ Region = "YourServiceRegion";
+ Port = 443;
+
+ // Recordings and locale
+ Locale = "en-US";
+ RecordingsBlobUri = "`YourAudioFile.wav";
+
+ // Name and description
+ Name = "Simple transcription";
+ Description = "Simple transcription description";
+
+ SpeechToTextBasePath = "/api/speechtotext/v2.0/";
+
+ // These classes show the properties on JSON objects returned by the Speech Service or sent to it.
+ /*
+ class ModelIdentity {
+ id;
+ }
+
+ class Transcription {
+ Name;
+ Description;
+ Locale;
+ RecordingsUrl;
+ ResultsUrls;
+ Id;
+ CreatedDateTime;
+ LastActionDateTime;
+ Status;
+ StatusMessage;
+ }
+
+ class TranscriptionDefinition {
+ Name;
+ Description;
+ RecordingsUrl;
+ Locale;
+ Models;
+ Properties;
+ }
+ */
+
+ var ts = {
+ Name: Name,
+ Description: Description,
+ Locale: Locale,
+ RecordingsUrl: RecordingsBlobUri,
+ Properties: {
+ "PunctuationMode": "DictatedAndAutomatic",
+ "ProfanityFilterMode": "Masked",
+ "AddWordLevelTimestamps": "True"
+ },
+ Models: []
+ };
+
+ var postPayload = JSON.stringify(ts);
+
+ var startOptions = {
+ hostname: Region + ".cris.ai",
+ port: Port,
+ path: SpeechToTextBasePath + "Transcriptions/",
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ 'Content-Length': postPayload.length,
+ "Ocp-Apim-Subscription-Key": SubscriptionKey
+ }
+ }
+
+ function PrintResults(resultUrl) {
+ var fetchOptions = {
+ headers: {
+ "Ocp-Apim-Subscription-Key": SubscriptionKey
+ }
+ }
+
+ var fetchRequest = https.get(new URL(resultUrl), fetchOptions, function (response) {
+ if (response.statusCode !== 200) {
+ console.info("Error retrieving status: " + response.statusCode);
+ } else {
+ var responseText = '';
+ response.setEncoding('utf8');
+ response.on("data", function (chunk) {
+ responseText += chunk;
+ });
+
+ response.on("end", function () {
+ console.info("Transcription Results:");
+ console.info(responseText);
+ });
+ }
+ });
+ }
+
+ function CheckTranscriptionStatus(statusUrl) {
+ transcription = null;
+ var fetchOptions = {
+ headers: {
+ "Ocp-Apim-Subscription-Key": SubscriptionKey
+ }
+ }
+
+ var fetchRequest = https.get(new URL(statusUrl), fetchOptions, function (response) {
+ if (response.statusCode !== 200) {
+ console.info("Error retrieving status: " + response.statusCode);
+ } else {
+ var responseText = '';
+ response.setEncoding('utf8');
+ response.on("data", function (chunk) {
+ responseText += chunk;
+ });
+
+ response.on("end", function () {
+ var statusObject = JSON.parse(responseText);
+
+ var done = false;
+ switch (statusObject.status) {
+ case "Failed":
+ console.info("Transcription failed. Status: " + transcription.StatusMessage);
+ done = true;
+ break;
+ case "Succeeded":
+ done = true;
+ PrintResults(statusObject.resultsUrls["channel_0"]);
+ break;
+ case "Running":
+ console.info("Transcription is still running.");
+ break;
+ case "NotStarted":
+ console.info("Transcription has not started.");
+ break;
+ }
+
+ if (!done) {
+ setTimeout(function () {
+ CheckTranscriptionStatus(statusUrl);
+ }, (5000));
+ }
+ });
+ }
+ });
+
+ fetchRequest.on("error", function (error) {
+ console.error(error);
+ });
+ }
+
+ var request = https.request(startOptions, function (response) {
+ if (response.statusCode != 202) {
+ console.error("Error, status code " + response.statusCode);
+ } else {
+
+ var transcriptionLocation = response.headers.location;
+
+ console.info("Created transcription at location " + transcriptionLocation);
+ console.info("Checking status.");
+
+ CheckTranscriptionStatus(transcriptionLocation);
+ }
+ });
+
+ request.on("error", function (error) {
+ console.error(error);
+ });
+
+ request.write(postPayload);
+ request.end();
+
+}());
diff --git a/quickstart/javascript/node/from-blob/package-lock.json b/quickstart/javascript/node/from-blob/package-lock.json
new file mode 100644
index 000000000..0f54cdcb6
--- /dev/null
+++ b/quickstart/javascript/node/from-blob/package-lock.json
@@ -0,0 +1,131 @@
+{
+ "name": "speech-sdk-quickstart-node",
+ "version": "1.0.0",
+ "lockfileVersion": 1,
+ "requires": true,
+ "dependencies": {
+ "agent-base": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-4.3.0.tgz",
+ "integrity": "sha512-salcGninV0nPrwpGNn4VTXBb1SOuXQBiqbrNXoeizJsHrsL6ERFM2Ne3JUSBWRE6aeNJI2ROP/WEEIDUiDe3cg==",
+ "requires": {
+ "es6-promisify": "^5.0.0"
+ }
+ },
+ "asn1.js": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.3.0.tgz",
+ "integrity": "sha512-WHnQJFcOrIWT1RLOkFFBQkFVvyt9BPOOrH+Dp152Zk4R993rSzXUGPmkybIcUFhHE2d/iHH+nCaOWVCDbO8fgA==",
+ "requires": {
+ "bn.js": "^4.0.0",
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "asn1.js-rfc2560": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/asn1.js-rfc2560/-/asn1.js-rfc2560-5.0.1.tgz",
+ "integrity": "sha512-1PrVg6kuBziDN3PGFmRk3QrjpKvP9h/Hv5yMrFZvC1kpzP6dQRzf5BpKstANqHBkaOUmTpakJWhicTATOA/SbA==",
+ "requires": {
+ "asn1.js-rfc5280": "^3.0.0"
+ }
+ },
+ "asn1.js-rfc5280": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/asn1.js-rfc5280/-/asn1.js-rfc5280-3.0.0.tgz",
+ "integrity": "sha512-Y2LZPOWeZ6qehv698ZgOGGCZXBQShObWnGthTrIFlIQjuV1gg2B8QOhWFRExq/MR1VnPpIIe7P9vX2vElxv+Pg==",
+ "requires": {
+ "asn1.js": "^5.0.0"
+ }
+ },
+ "bn.js": {
+ "version": "4.11.8",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
+ "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA=="
+ },
+ "debug": {
+ "version": "3.2.6",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
+ "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w=="
+ },
+ "es6-promisify": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/es6-promisify/-/es6-promisify-5.0.0.tgz",
+ "integrity": "sha1-UQnWLz5W6pZ8S2NQWu8IKRyKUgM=",
+ "requires": {
+ "es6-promise": "^4.0.3"
+ }
+ },
+ "https-proxy-agent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-3.0.0.tgz",
+ "integrity": "sha512-y4jAxNEihqvBI5F3SaO2rtsjIOnnNA8sEbuiP+UhJZJHeM2NRm6c09ax2tgqme+SgUUvjao2fJXF4h3D6Cb2HQ==",
+ "requires": {
+ "agent-base": "^4.3.0",
+ "debug": "^3.1.0"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "microsoft-cognitiveservices-speech-sdk": {
+ "version": "1.12.0",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.12.0.tgz",
+ "integrity": "sha512-03nPTggWCjkvVvJ6Y795UUKkFhdVn32yYqIpI/vEvAQAssffbcP5syU8JfUzN9TF4UfYvUFKlnyNgGj7y1DyAw==",
+ "requires": {
+ "asn1.js-rfc2560": "^5.0.0",
+ "asn1.js-rfc5280": "^3.0.0",
+ "https-proxy-agent": "^3.0.1",
+ "simple-lru-cache": "0.0.2",
+ "ws": "^7.2.0"
+ },
+ "dependencies": {
+ "https-proxy-agent": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-3.0.1.tgz",
+ "integrity": "sha512-+ML2Rbh6DAuee7d07tYGEKOEi2voWPUGan+ExdPbPW6Z3svq+JCqr0v8WmKPOkz1vOVykPCBSuobe7G8GJUtVg==",
+ "requires": {
+ "agent-base": "^4.3.0",
+ "debug": "^3.1.0"
+ }
+ }
+ }
+ },
+ "minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "simple-lru-cache": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/simple-lru-cache/-/simple-lru-cache-0.0.2.tgz",
+ "integrity": "sha1-1ZzDoZPBpdAyD4Tucy9uRxPlEd0="
+ },
+ "ws": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.2.3.tgz",
+ "integrity": "sha512-HTDl9G9hbkNDk98naoR/cHDws7+EyYMOdL1BmjsZXRUjf7d+MficC4B7HLUPlSiho0vg+CWKrGIt/VJBd1xunQ=="
+ }
+ }
+}
diff --git a/quickstart/javascript/node/from-blob/package.json b/quickstart/javascript/node/from-blob/package.json
new file mode 100644
index 000000000..568b2803a
--- /dev/null
+++ b/quickstart/javascript/node/from-blob/package.json
@@ -0,0 +1,14 @@
+{
+ "private": true,
+ "name": "speech-sdk-quickstart-node",
+ "version": "1.0.0",
+ "description": "Quickstart for the Microsoft Speech SDK on Node.js",
+ "main": "index.js",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "author": "Microsoft",
+ "license": "MIT",
+ "dependencies": {
+ }
+}
diff --git a/quickstart/javascript/node/from-file/package-lock.json b/quickstart/javascript/node/from-file/package-lock.json
index 2a8d5677f..0f54cdcb6 100644
--- a/quickstart/javascript/node/from-file/package-lock.json
+++ b/quickstart/javascript/node/from-file/package-lock.json
@@ -80,8 +80,8 @@
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"microsoft-cognitiveservices-speech-sdk": {
- "version": "1.11.0",
- "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.11.0.tgz",
+ "version": "1.12.0",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.12.0.tgz",
"integrity": "sha512-03nPTggWCjkvVvJ6Y795UUKkFhdVn32yYqIpI/vEvAQAssffbcP5syU8JfUzN9TF4UfYvUFKlnyNgGj7y1DyAw==",
"requires": {
"asn1.js-rfc2560": "^5.0.0",
diff --git a/quickstart/javascript/node/from-file/package.json b/quickstart/javascript/node/from-file/package.json
index b88458491..ffcaed405 100644
--- a/quickstart/javascript/node/from-file/package.json
+++ b/quickstart/javascript/node/from-file/package.json
@@ -11,6 +11,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.11.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.12.0"
}
}
diff --git a/quickstart/javascript/node/text-to-speech/package-lock.json b/quickstart/javascript/node/text-to-speech/package-lock.json
index af6585600..ba93e8210 100644
--- a/quickstart/javascript/node/text-to-speech/package-lock.json
+++ b/quickstart/javascript/node/text-to-speech/package-lock.json
@@ -80,8 +80,8 @@
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"microsoft-cognitiveservices-speech-sdk": {
- "version": "1.11.0",
- "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.11.0.tgz",
+ "version": "1.12.0",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.12.0.tgz",
"integrity": "sha512-03nPTggWCjkvVvJ6Y795UUKkFhdVn32yYqIpI/vEvAQAssffbcP5syU8JfUzN9TF4UfYvUFKlnyNgGj7y1DyAw==",
"requires": {
"asn1.js-rfc2560": "^5.0.0",
diff --git a/quickstart/javascript/node/text-to-speech/package.json b/quickstart/javascript/node/text-to-speech/package.json
index 95985d536..c5fc3ccfc 100644
--- a/quickstart/javascript/node/text-to-speech/package.json
+++ b/quickstart/javascript/node/text-to-speech/package.json
@@ -11,7 +11,7 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.11.0",
+ "microsoft-cognitiveservices-speech-sdk": "^1.12.0",
"readline": "^1.3.0"
}
}
diff --git a/quickstart/objectivec/macos/from-microphone/helloworld/Podfile b/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
index 64a3589ad..12f998590 100644
--- a/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
+++ b/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
@@ -1,4 +1,4 @@
target 'helloworld' do
platform :osx, '10.13'
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.11.0'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.12.0'
end
diff --git a/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile b/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
index 64a3589ad..12f998590 100644
--- a/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
+++ b/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
@@ -1,4 +1,4 @@
target 'helloworld' do
platform :osx, '10.13'
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.11.0'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.12.0'
end
diff --git a/samples/cpp/windows/console/samples/packages.config b/samples/cpp/windows/console/samples/packages.config
index 88ad9f2c0..27bae9541 100644
--- a/samples/cpp/windows/console/samples/packages.config
+++ b/samples/cpp/windows/console/samples/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/samples/cpp/windows/console/samples/samples.vcxproj b/samples/cpp/windows/console/samples/samples.vcxproj
index 3d57232ee..2b71267ee 100644
--- a/samples/cpp/windows/console/samples/samples.vcxproj
+++ b/samples/cpp/windows/console/samples/samples.vcxproj
@@ -56,7 +56,7 @@
-
+
@@ -190,6 +190,6 @@
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/console/samples/packages.config b/samples/csharp/dotnet-windows/console/samples/packages.config
index 84d65f50c..abc85e499 100644
--- a/samples/csharp/dotnet-windows/console/samples/packages.config
+++ b/samples/csharp/dotnet-windows/console/samples/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/console/samples/samples.csproj b/samples/csharp/dotnet-windows/console/samples/samples.csproj
index 956cad9a3..7905b4a1c 100644
--- a/samples/csharp/dotnet-windows/console/samples/samples.csproj
+++ b/samples/csharp/dotnet-windows/console/samples/samples.csproj
@@ -59,8 +59,8 @@
win-x64
-
- ..\packages\Microsoft.CognitiveServices.Speech.1.11.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
+
+ ..\packages\Microsoft.CognitiveServices.Speech.1.12.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
@@ -120,11 +120,11 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/packages.config b/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/packages.config
index 84d65f50c..abc85e499 100644
--- a/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/packages.config
+++ b/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/speechtotext-wpf.csproj b/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/speechtotext-wpf.csproj
index 948465b3c..e15df2ad0 100644
--- a/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/speechtotext-wpf.csproj
+++ b/samples/csharp/dotnet-windows/speechtotext-wpf/speechtotext-wpf/speechtotext-wpf.csproj
@@ -62,8 +62,8 @@
-
- ..\packages\Microsoft.CognitiveServices.Speech.1.11.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
+
+ ..\packages\Microsoft.CognitiveServices.Speech.1.12.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
True
@@ -86,11 +86,11 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/packages.config b/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/packages.config
index 0bc466c34..17de81558 100644
--- a/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/packages.config
+++ b/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/packages.config
@@ -1,4 +1,4 @@
-
+
diff --git a/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/translation-wpf.csproj b/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/translation-wpf.csproj
index 8eb0de989..f10c9448c 100644
--- a/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/translation-wpf.csproj
+++ b/samples/csharp/dotnet-windows/translation-wpf/translation-wpf/translation-wpf.csproj
@@ -78,8 +78,8 @@
-
- ..\packages\Microsoft.CognitiveServices.Speech.1.11.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
+
+ ..\packages\Microsoft.CognitiveServices.Speech.1.12.0\lib\net461\Microsoft.CognitiveServices.Speech.csharp.dll
@@ -99,11 +99,11 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
diff --git a/samples/csharp/dotnetcore/console/samples/samples.csproj b/samples/csharp/dotnetcore/console/samples/samples.csproj
index ba533b246..1ca95148f 100644
--- a/samples/csharp/dotnetcore/console/samples/samples.csproj
+++ b/samples/csharp/dotnetcore/console/samples/samples.csproj
@@ -28,7 +28,7 @@
-
+
diff --git a/samples/csharp/sharedcontent/console/Program.cs b/samples/csharp/sharedcontent/console/Program.cs
index d9b34acd2..b8307ef31 100644
--- a/samples/csharp/sharedcontent/console/Program.cs
+++ b/samples/csharp/sharedcontent/console/Program.cs
@@ -45,6 +45,9 @@ static void Main(string[] args)
Console.WriteLine("R. Speech synthesis using authorization token.");
Console.WriteLine("S. Speech synthesis in server scenario.");
Console.WriteLine("T. Speech recognition with compressed input pull audio stream.");
+ Console.WriteLine("U. Speech recognition with compressed input push audio stream.");
+ Console.WriteLine("V. Translation with compressed input push audio stream.");
+ Console.WriteLine("W. Keyword recognizer.");
Console.Write(prompt);
@@ -142,6 +145,17 @@ static void Main(string[] args)
case ConsoleKey.T:
SpeechRecognitionSamples.SpeechRecognitionWithCompressedInputPullStreamAudio().Wait();
break;
+ case ConsoleKey.U:
+ SpeechRecognitionSamples.SpeechRecognitionWithCompressedInputPushStreamAudio().Wait();
+ break;
+ case ConsoleKey.V:
+ TranslationSamples.TranslationWithFileCompressedInputAsync().Wait();
+ break;
+
+ case ConsoleKey.W:
+ SpeechRecognitionSamples.KeywordRecognizer().Wait();
+ break;
+
case ConsoleKey.D0:
Console.WriteLine("Exiting...");
break;
diff --git a/samples/csharp/sharedcontent/console/helper.cs b/samples/csharp/sharedcontent/console/helper.cs
index ca27b948a..d6845b714 100644
--- a/samples/csharp/sharedcontent/console/helper.cs
+++ b/samples/csharp/sharedcontent/console/helper.cs
@@ -34,6 +34,12 @@ public static BinaryAudioStreamReader CreateWavReader(string filename)
return new BinaryAudioStreamReader(reader);
}
+ public static BinaryAudioStreamReader CreateBinaryFileReader(string filename)
+ {
+ BinaryReader reader = new BinaryReader(File.OpenRead(filename));
+ return new BinaryAudioStreamReader(reader);
+ }
+
public static AudioStreamFormat readWaveHeader(BinaryReader reader)
{
// Tag "RIFF"
diff --git a/samples/csharp/sharedcontent/console/speech_recognition_samples.cs b/samples/csharp/sharedcontent/console/speech_recognition_samples.cs
index 24068c355..ace5f56eb 100644
--- a/samples/csharp/sharedcontent/console/speech_recognition_samples.cs
+++ b/samples/csharp/sharedcontent/console/speech_recognition_samples.cs
@@ -32,9 +32,9 @@ public static async Task RecognitionWithMicrophoneAsync()
// Starts speech recognition, and returns after a single utterance is recognized. The end of a
// single utterance is determined by listening for silence at the end or until a maximum of 15
- // seconds of audio is processed. The task returns the recognition text as result.
+ // seconds of audio is processed. The task returns the recognition text as result.
// Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
- // shot recognition like command or query.
+ // shot recognition like command or query.
// For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);
@@ -83,9 +83,9 @@ public static async Task RecognitionWithLanguageAndDetailedOutputAsync()
// Starts speech recognition, and returns after a single utterance is recognized. The end of a
// single utterance is determined by listening for silence at the end or until a maximum of 15
- // seconds of audio is processed. The task returns the recognition text as result.
+ // seconds of audio is processed. The task returns the recognition text as result.
// Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
- // shot recognition like command or query.
+ // shot recognition like command or query.
// For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);
@@ -137,9 +137,9 @@ public static async Task RecognitionUsingCustomizedModelAsync()
// Starts speech recognition, and returns after a single utterance is recognized. The end of a
// single utterance is determined by listening for silence at the end or until a maximum of 15
- // seconds of audio is processed. The task returns the recognition text as result.
+ // seconds of audio is processed. The task returns the recognition text as result.
// Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
- // shot recognition like command or query.
+ // shot recognition like command or query.
// For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);
@@ -244,6 +244,7 @@ public static async Task ContinuousRecognitionWithFileAsync()
public static async Task SpeechRecognitionWithCompressedInputPullStreamAudio()
{
+ //
// Creates an instance of a speech config with specified subscription key and service region.
// Replace with your own subscription key and service region (e.g., "westus").
var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
@@ -315,6 +316,97 @@ public static async Task SpeechRecognitionWithCompressedInputPullStreamAudio()
await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
}
}
+ //
+ }
+
+ public static async Task SpeechRecognitionWithCompressedInputPushStreamAudio()
+ {
+ //
+ // Creates an instance of a speech config with specified subscription key and service region.
+ // Replace with your own subscription key and service region (e.g., "westus").
+ var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
+
+ var stopRecognition = new TaskCompletionSource();
+
+ using (var pushStream = AudioInputStream.CreatePushStream(AudioStreamFormat.GetCompressedFormat(AudioStreamContainerFormat.MP3)))
+ {
+ using (var audioInput = AudioConfig.FromStreamInput(pushStream))
+ {
+ // Creates a speech recognizer using audio stream input.
+ using (var recognizer = new SpeechRecognizer(config, audioInput))
+ {
+ // Subscribes to events.
+ recognizer.Recognizing += (s, e) =>
+ {
+ Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
+ };
+
+ recognizer.Recognized += (s, e) =>
+ {
+ if (e.Result.Reason == ResultReason.RecognizedSpeech)
+ {
+ Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
+ }
+ else if (e.Result.Reason == ResultReason.NoMatch)
+ {
+ Console.WriteLine($"NOMATCH: Speech could not be recognized.");
+ }
+ };
+
+ recognizer.Canceled += (s, e) =>
+ {
+ Console.WriteLine($"CANCELED: Reason={e.Reason}");
+
+ if (e.Reason == CancellationReason.Error)
+ {
+ Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
+ Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
+ Console.WriteLine($"CANCELED: Did you update the subscription info?");
+ }
+
+ stopRecognition.TrySetResult(0);
+ };
+
+ recognizer.SessionStarted += (s, e) =>
+ {
+ Console.WriteLine("\nSession started event.");
+ };
+
+ recognizer.SessionStopped += (s, e) =>
+ {
+ Console.WriteLine("\nSession stopped event.");
+ Console.WriteLine("\nStop recognition.");
+ stopRecognition.TrySetResult(0);
+ };
+
+ // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
+ await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
+
+ using (BinaryAudioStreamReader reader = Helper.CreateBinaryFileReader(@"whatstheweatherlike.mp3"))
+ {
+ byte[] buffer = new byte[1000];
+ while (true)
+ {
+ var readSamples = reader.Read(buffer, (uint)buffer.Length);
+ if (readSamples == 0)
+ {
+ break;
+ }
+ pushStream.Write(buffer, readSamples);
+ }
+ }
+ pushStream.Close();
+
+ // Waits for completion.
+ // Use Task.WaitAny to keep the task rooted.
+ Task.WaitAny(new[] { stopRecognition.Task });
+
+ // Stops recognition.
+ await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
+ }
+ }
+ }
+ //
}
// Speech recognition with audio stream
@@ -824,5 +916,32 @@ public static async Task RecognitionWithAutoDetectSourceLanguageAndCustomModelAs
}
}
}
+
+ public static async Task KeywordRecognizer()
+ {
+ Console.WriteLine("say something ...");
+ using (var audioInput = AudioConfig.FromDefaultMicrophoneInput())
+ {
+ using (var recognizer = new KeywordRecognizer(audioInput))
+ {
+ var model = KeywordRecognitionModel.FromFile("YourKeywordModelFilename.");
+ var result = await recognizer.RecognizeOnceAsync(model).ConfigureAwait(false);
+ Console.WriteLine($"got result reason as {result.Reason}");
+ if(result.Reason == ResultReason.RecognizedKeyword)
+ {
+ var stream = AudioDataStream.FromResult(result);
+
+ await Task.Delay(2000);
+
+ stream.DetachInput();
+ await stream.SaveToWaveFileAsync("AudioFromRecognizedKeyword.wav");
+ }
+ else
+ {
+ Console.WriteLine($"got result reason as {result.Reason}. You can't get audio when no keyword is recognized.");
+ }
+ }
+ }
+ }
}
}
diff --git a/samples/csharp/sharedcontent/console/speech_synthesis_server_scenario_sample.cs b/samples/csharp/sharedcontent/console/speech_synthesis_server_scenario_sample.cs
index 25a37efcb..ec78b11bf 100644
--- a/samples/csharp/sharedcontent/console/speech_synthesis_server_scenario_sample.cs
+++ b/samples/csharp/sharedcontent/console/speech_synthesis_server_scenario_sample.cs
@@ -15,21 +15,21 @@
namespace MicrosoftSpeechSDKSamples
{
// For server scenario synthesizing with high concurrency, we recommend two methods to reduce the latency.
- // Firstly, reuse the synthesizers (e.g. use a synthesizer pool )to reduce the connection establish latency;
+ // Firstly, reuse the synthesizers (e.g. use a synthesizer pool ) to reduce the connection establish latency;
// secondly, use AudioOutputStream or synthesizing event to streaming receive the synthesized audio to lower the first byte latency.
public class SynthesizerPool : IDisposable
{
private readonly Func _synthesizerGenerator;
- private readonly ConcurrentBag _synthesizerBag;
+ private readonly ConcurrentStack _synthesizerStack;
private readonly int _initialCapacity;
private readonly int _maximumRetainedCapacity;
public SynthesizerPool(Func synthesizerGenerator, int initialCapacity = 2, int maximumRetainedCapacity = 64)
{
_synthesizerGenerator = synthesizerGenerator;
- _synthesizerBag = new ConcurrentBag();
+ _synthesizerStack = new ConcurrentStack();
_initialCapacity = initialCapacity;
_maximumRetainedCapacity = maximumRetainedCapacity;
for (var i = 0; i < initialCapacity; i++)
@@ -40,7 +40,7 @@ public SynthesizerPool(Func synthesizerGenerator, int initial
public void Dispose()
{
- foreach (var synthesizer in _synthesizerBag)
+ foreach (var synthesizer in _synthesizerStack)
{
synthesizer.Dispose();
}
@@ -48,14 +48,14 @@ public void Dispose()
public SpeechSynthesizer Get()
{
- return _synthesizerBag.TryTake(out SpeechSynthesizer item) ? item : _synthesizerGenerator();
+ return _synthesizerStack.TryPop(out SpeechSynthesizer item) ? item : _synthesizerGenerator();
}
public void Put(SpeechSynthesizer item)
{
- if (_synthesizerBag.Count < _maximumRetainedCapacity)
+ if (_synthesizerStack.Count < _maximumRetainedCapacity)
{
- _synthesizerBag.Add(item);
+ _synthesizerStack.Push(item);
}
else
{
diff --git a/samples/csharp/sharedcontent/console/translation_samples.cs b/samples/csharp/sharedcontent/console/translation_samples.cs
index 91518bdc0..18344573b 100644
--- a/samples/csharp/sharedcontent/console/translation_samples.cs
+++ b/samples/csharp/sharedcontent/console/translation_samples.cs
@@ -233,6 +233,131 @@ public static async Task TranslationWithFileAsync()
//
}
+ // Translation using compressed file input.
+ public static async Task TranslationWithFileCompressedInputAsync()
+ {
+ //
+ // Translation source language with compressed format.
+ // Replace with a language of your choice.
+ string fromLanguage = "en-US";
+
+ // Creates an instance of a speech translation config with specified subscription key and service region.
+ // Replace with your own subscription key and service region (e.g., "westus").
+ var config = SpeechTranslationConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ config.SpeechRecognitionLanguage = fromLanguage;
+
+ // Translation target language(s).
+ // Replace with language(s) of your choice.
+ config.AddTargetLanguage("de");
+ config.AddTargetLanguage("fr");
+
+ var stopTranslation = new TaskCompletionSource();
+
+ // Creates a translation recognizer using file as audio input.
+ using (var pushStream = AudioInputStream.CreatePushStream(AudioStreamFormat.GetCompressedFormat(AudioStreamContainerFormat.MP3)))
+ {
+ using (var audioInput = AudioConfig.FromStreamInput(pushStream))
+ {
+ using (var recognizer = new TranslationRecognizer(config, audioInput))
+ {
+ // Subscribes to events.
+ recognizer.Recognizing += (s, e) =>
+ {
+ Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
+ foreach (var element in e.Result.Translations)
+ {
+ Console.WriteLine($" TRANSLATING into '{element.Key}': {element.Value}");
+ }
+ };
+
+ recognizer.Recognized += (s, e) =>
+ {
+ if (e.Result.Reason == ResultReason.TranslatedSpeech)
+ {
+ Console.WriteLine($"RECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
+ foreach (var element in e.Result.Translations)
+ {
+ Console.WriteLine($" TRANSLATED into '{element.Key}': {element.Value}");
+ }
+ }
+ else if (e.Result.Reason == ResultReason.RecognizedSpeech)
+ {
+ Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
+ Console.WriteLine($" Speech not translated.");
+ }
+ else if (e.Result.Reason == ResultReason.NoMatch)
+ {
+ Console.WriteLine($"NOMATCH: Speech could not be recognized.");
+ }
+ };
+
+ recognizer.Canceled += (s, e) =>
+ {
+ Console.WriteLine($"CANCELED: Reason={e.Reason}");
+
+ if (e.Reason == CancellationReason.Error)
+ {
+ Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
+ Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
+ Console.WriteLine($"CANCELED: Did you update the subscription info?");
+ }
+
+ stopTranslation.TrySetResult(0);
+ };
+
+ recognizer.SpeechStartDetected += (s, e) =>
+ {
+ Console.WriteLine("\nSpeech start detected event.");
+ };
+
+ recognizer.SpeechEndDetected += (s, e) =>
+ {
+ Console.WriteLine("\nSpeech end detected event.");
+ };
+
+ recognizer.SessionStarted += (s, e) =>
+ {
+ Console.WriteLine("\nSession started event.");
+ };
+
+ recognizer.SessionStopped += (s, e) =>
+ {
+ Console.WriteLine("\nSession stopped event.");
+ Console.WriteLine($"\nStop translation.");
+ stopTranslation.TrySetResult(0);
+ };
+
+ // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
+ Console.WriteLine("Start translation...");
+ await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
+
+ // Replace with your own audio file name.
+ using (BinaryAudioStreamReader reader = Helper.CreateBinaryFileReader(@"whatstheweatherlike.mp3"))
+ {
+ byte[] buffer = new byte[1000];
+ while (true)
+ {
+ var readSamples = reader.Read(buffer, (uint)buffer.Length);
+ if (readSamples == 0)
+ {
+ break;
+ }
+ pushStream.Write(buffer, readSamples);
+ }
+ }
+ pushStream.Close();
+ // Waits for completion.
+ // Use Task.WaitAny to keep the task rooted.
+ Task.WaitAny(new[] { stopTranslation.Task });
+
+ // Stops translation.
+ await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
+ }
+ }
+ }
+ //
+ }
+
// Translation using audio stream.
public static async Task TranslationWithAudioStreamAsync()
{
diff --git a/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj b/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
index ecc477400..748d08d47 100644
--- a/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
+++ b/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
@@ -108,7 +108,7 @@
- 1.11.0
+ 1.12.0
6.2.8
diff --git a/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj b/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
index 70cf201a2..fc8e819fd 100644
--- a/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
+++ b/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
@@ -107,7 +107,7 @@
- 1.11.0
+ 1.12.0
6.2.8
diff --git a/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj b/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
index 31abd42c3..436a4db66 100644
--- a/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
+++ b/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
@@ -165,7 +165,7 @@
4.3.2
- 1.11.0
+ 1.12.0
6.2.8
diff --git a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.Android/kws-xamarin.Android.csproj b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.Android/kws-xamarin.Android.csproj
index f60e1828c..e701f5e26 100644
--- a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.Android/kws-xamarin.Android.csproj
+++ b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.Android/kws-xamarin.Android.csproj
@@ -52,7 +52,7 @@
- 1.11.0
+ 1.12.0
diff --git a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.UWP/kws-xamarin.UWP.csproj b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.UWP/kws-xamarin.UWP.csproj
index e600f3710..c95eecdfd 100644
--- a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.UWP/kws-xamarin.UWP.csproj
+++ b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.UWP/kws-xamarin.UWP.csproj
@@ -147,7 +147,7 @@
- 1.11.0
+ 1.12.0
diff --git a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.iOS/kws-xamarin.iOS.csproj b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.iOS/kws-xamarin.iOS.csproj
index eb1feb9a0..9dd40993f 100644
--- a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.iOS/kws-xamarin.iOS.csproj
+++ b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin.iOS/kws-xamarin.iOS.csproj
@@ -124,7 +124,7 @@
- 1.11.0
+ 1.12.0
diff --git a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin/kws-xamarin.csproj b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin/kws-xamarin.csproj
index 902a5fb54..ef70067b9 100644
--- a/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin/kws-xamarin.csproj
+++ b/samples/csharp/xamarin/kws-xamarin/kws-xamarin/kws-xamarin/kws-xamarin.csproj
@@ -10,7 +10,7 @@
-
+
diff --git a/samples/java/android/compressed-input/app/build.gradle b/samples/java/android/compressed-input/app/build.gradle
index 2d8ea0a31..0209ad0f8 100644
--- a/samples/java/android/compressed-input/app/build.gradle
+++ b/samples/java/android/compressed-input/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.11.0'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.12.0'
implementation 'com.android.support:appcompat-v7:27.1.1'
implementation 'com.android.support.constraint:constraint-layout:1.1.2'
diff --git a/samples/java/android/compressed-input/app/src/main/java/com/microsoft/cognitiveservices/speech/samples/compressedinput/MainActivity.java b/samples/java/android/compressed-input/app/src/main/java/com/microsoft/cognitiveservices/speech/samples/compressedinput/MainActivity.java
index 88e205d87..a4f73721c 100644
--- a/samples/java/android/compressed-input/app/src/main/java/com/microsoft/cognitiveservices/speech/samples/compressedinput/MainActivity.java
+++ b/samples/java/android/compressed-input/app/src/main/java/com/microsoft/cognitiveservices/speech/samples/compressedinput/MainActivity.java
@@ -20,7 +20,7 @@
import com.microsoft.cognitiveservices.speech.audio.AudioInputStream;
import com.microsoft.cognitiveservices.speech.audio.AudioStreamFormat;
import com.microsoft.cognitiveservices.speech.audio.PullAudioInputStream;
-import com.microsoft.cognitiveservices.speech.internal.AudioStreamContainerFormat;
+import com.microsoft.cognitiveservices.speech.audio.AudioStreamContainerFormat;
import java.util.concurrent.Future;
diff --git a/samples/java/android/sdkdemo/app/build.gradle b/samples/java/android/sdkdemo/app/build.gradle
index 332e6a4b8..087f04a3f 100644
--- a/samples/java/android/sdkdemo/app/build.gradle
+++ b/samples/java/android/sdkdemo/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.11.0'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.12.0'
implementation 'com.android.support:appcompat-v7:27.1.1'
implementation 'com.android.support.constraint:constraint-layout:1.1.2'
diff --git a/samples/java/jre/console/pom.xml b/samples/java/jre/console/pom.xml
index 2964bae2f..1f86cc0b7 100644
--- a/samples/java/jre/console/pom.xml
+++ b/samples/java/jre/console/pom.xml
@@ -27,7 +27,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.11.0
+ 1.12.0
org.apache.commons
diff --git a/samples/js/browser/README.md b/samples/js/browser/README.md
index 5e94fd6e5..5d94026f6 100644
--- a/samples/js/browser/README.md
+++ b/samples/js/browser/README.md
@@ -1,6 +1,6 @@
-# JavaScript Speech Recongition and Translation Sample for the Web Browser
+# JavaScript Speech Recongition, Syntheis, and Translation Sample for the Web Browser
-This sample demonstrates how to recognize speech with the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome.
+This sample demonstrates how to recognize, synthesize, and translate speech with the Speech SDK for JavaScript on a web browser, like Microsoft Edge, or Chrome.
See [this article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-js-browser) for introductory information on the Speech SDK for JavaScript.
## Prerequisites
@@ -26,11 +26,13 @@ If you want to host the sample on a web server:
* Replace the string `YourSubscriptionKey` with your own subscription key.
* Edit the `index.html` source:
* Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
+* For synthesis, edit the `synthesis.html` source:
+ * Replace the value for the variable `authorizationEndpoint` with the full URL where you can access the token.php resource.
* Deploy all files to your web server.
## Run the sample
-* In case you are running the sample from your local computer, open index.html from the location where you have downloaded this sample with a JavaScript capable browser.
+* In case you are running the sample from your local computer, open `index.html` or `synthesis.html` from the location where you have downloaded this sample with a JavaScript capable browser.
Use the input fields to set your subscription key and service region.
* In case you are hosting the sample on a web server, open a web browser and navigate to the full URL where you host the sample.
diff --git a/samples/js/browser/synthesis.html b/samples/js/browser/synthesis.html
new file mode 100644
index 000000000..3e8c5fa31
--- /dev/null
+++ b/samples/js/browser/synthesis.html
@@ -0,0 +1,432 @@
+
+
+
+ Microsoft Cognitive Services Speech SDK JavaScript Sample for Speech Synthesis
+
+
+
+
+
+
Speech Speech SDK not found
+ (microsoft.cognitiveservices.speech.sdk.bundle.js missing).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/samples/js/node/package-lock.json b/samples/js/node/package-lock.json
index 1d9e5f47e..0ae8b1794 100644
--- a/samples/js/node/package-lock.json
+++ b/samples/js/node/package-lock.json
@@ -80,8 +80,8 @@
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"microsoft-cognitiveservices-speech-sdk": {
- "version": "1.11.0",
- "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.11.0.tgz",
+ "version": "1.12.0",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.12.0.tgz",
"integrity": "sha512-03nPTggWCjkvVvJ6Y795UUKkFhdVn32yYqIpI/vEvAQAssffbcP5syU8JfUzN9TF4UfYvUFKlnyNgGj7y1DyAw==",
"requires": {
"asn1.js-rfc2560": "^5.0.0",
diff --git a/samples/js/node/package.json b/samples/js/node/package.json
index 8994a7628..d297a1fdf 100644
--- a/samples/js/node/package.json
+++ b/samples/js/node/package.json
@@ -11,7 +11,7 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.11.0",
+ "microsoft-cognitiveservices-speech-sdk": "^1.12.0",
"readline": "^1.3.0"
}
}
diff --git a/samples/objective-c/ios/speech-samples/speech-samples/ViewController.m b/samples/objective-c/ios/speech-samples/speech-samples/ViewController.m
index 933d28b73..22ac82b4b 100644
--- a/samples/objective-c/ios/speech-samples/speech-samples/ViewController.m
+++ b/samples/objective-c/ios/speech-samples/speech-samples/ViewController.m
@@ -17,6 +17,7 @@ @interface ViewController () {
@property (strong, nonatomic) IBOutlet UIButton *recognizeWithPhraseHintButton;
@property (strong, nonatomic) IBOutlet UIButton *recognizeWithPushStreamButton;
@property (strong, nonatomic) IBOutlet UIButton *recognizeWithPullStreamButton;
+@property (strong, nonatomic) IBOutlet UIButton *recognizeWithAutoLanguageDetectionButton;
@property (strong, nonatomic) IBOutlet UILabel *recognitionResultLabel;
@@ -25,6 +26,7 @@ - (IBAction)recognizeFromMicButtonTapped:(UIButton *)sender;
- (IBAction)recognizeWithPhraseHintButtonTapped:(UIButton *)sender;
- (IBAction)recognizeWithPushStreamButtonTapped:(UIButton *)sender;
- (IBAction)recognizeWithPullStreamButtonTapped:(UIButton *)sender;
+- (IBAction)recognizeWithAutoLanguageDetectionButtonTapped:(UIButton *)sender;
@end
@implementation ViewController
@@ -71,6 +73,13 @@ - (void)viewDidLoad {
self.recognizeWithPullStreamButton.accessibilityIdentifier = @"recognize_pull_stream_button";
[self.view addSubview:self.recognizeWithPullStreamButton];
+ self.recognizeWithAutoLanguageDetectionButton = [UIButton buttonWithType:UIButtonTypeSystem];
+ [self.recognizeWithAutoLanguageDetectionButton addTarget:self action:@selector(recognizeWithAutoLanguageDetectionButtonTapped:) forControlEvents:UIControlEventTouchUpInside];
+ [self.recognizeWithAutoLanguageDetectionButton setTitle:@"Start rec with auto language detection" forState:UIControlStateNormal];
+ [self.recognizeWithAutoLanguageDetectionButton setFrame:CGRectMake(50.0, 350.0, 300.0, 50.0)];
+ self.recognizeWithAutoLanguageDetectionButton.accessibilityIdentifier = @"recognize_language_detection_button";
+ [self.view addSubview:self.recognizeWithAutoLanguageDetectionButton];
+
self.recognitionResultLabel = [[UILabel alloc] initWithFrame:CGRectMake(50.0, 350.0, 300.0, 400.0)];
self.recognitionResultLabel.lineBreakMode = NSLineBreakByWordWrapping;
self.recognitionResultLabel.numberOfLines = 0;
@@ -110,6 +119,12 @@ - (IBAction)recognizeWithPullStreamButtonTapped:(UIButton *)sender {
});
}
+- (IBAction)recognizeWithAutoLanguageDetectionButtonTapped:(UIButton *)sender {
+ dispatch_async(dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^{
+ [self recognizeWithAutoLanguageDetection];
+ });
+}
+
/*
* Performs speech recognition from a RIFF wav file.
*/
@@ -473,6 +488,63 @@ - (void)recognizeWithPullStream {
[speechRecognizer stopContinuousRecognition];
}
+/*
+ * Performs speech recognition with auto source language detection
+ */
+- (void)recognizeWithAutoLanguageDetection {
+ NSBundle *mainBundle = [NSBundle mainBundle];
+ NSString *weatherFile = [mainBundle pathForResource: @"whatstheweatherlike" ofType:@"wav"];
+ NSLog(@"weatherFile path: %@", weatherFile);
+ if (!weatherFile) {
+ NSLog(@"Cannot find audio file!");
+ [self updateRecognitionErrorText:(@"Cannot find audio file")];
+ return;
+ }
+
+ SPXAudioConfiguration* weatherAudioSource = [[SPXAudioConfiguration alloc] initWithWavFileInput:weatherFile];
+ if (!weatherAudioSource) {
+ NSLog(@"Loading audio file failed!");
+ [self updateRecognitionErrorText:(@"Audio Error")];
+ return;
+ }
+
+ SPXSpeechConfiguration *speechConfig = [[SPXSpeechConfiguration alloc] initWithSubscription:speechKey region:serviceRegion];
+ if (!speechConfig) {
+ NSLog(@"Could not load speech config");
+ [self updateRecognitionErrorText:(@"Speech Config Error")];
+ return;
+ }
+
+ NSArray *languages = @[@"zh-CN", @"en-US"];
+ SPXAutoDetectSourceLanguageConfiguration* autoDetectSourceLanguageConfig = [[SPXAutoDetectSourceLanguageConfiguration alloc]init:languages];
+
+ [self updateRecognitionStatusText:(@"Recognizing...")];
+
+ SPXSpeechRecognizer* speechRecognizer = [[SPXSpeechRecognizer alloc] initWithSpeechConfiguration:speechConfig
+ autoDetectSourceLanguageConfiguration:autoDetectSourceLanguageConfig
+ audioConfiguration:weatherAudioSource];
+ if (!speechRecognizer) {
+ NSLog(@"Could not create speech recognizer");
+ [self updateRecognitionResultText:(@"Speech Recognition Error")];
+ return;
+ }
+
+ SPXSpeechRecognitionResult *speechResult = [speechRecognizer recognizeOnce];
+ if (SPXResultReason_Canceled == speechResult.reason) {
+ SPXCancellationDetails *details = [[SPXCancellationDetails alloc] initFromCanceledRecognitionResult:speechResult];
+ NSLog(@"Speech recognition was canceled: %@. Did you pass the correct key/region combination?", details.errorDetails);
+ [self updateRecognitionErrorText:([NSString stringWithFormat:@"Canceled: %@", details.errorDetails ])];
+ } else if (SPXResultReason_RecognizedSpeech == speechResult.reason) {
+ SPXAutoDetectSourceLanguageResult *languageResult = [[SPXAutoDetectSourceLanguageResult alloc] init:speechResult];
+ NSLog(@"Speech recognition result received: %@ in language %@", speechResult.text, [languageResult language]);
+ NSString *resultText = [NSString stringWithFormat:@"Language: %@, %@", [languageResult language], speechResult.text];
+ [self updateRecognitionResultText:(resultText)];
+ } else {
+ NSLog(@"There was an error.");
+ [self updateRecognitionErrorText:(@"Speech Recognition Error")];
+ }
+}
+
- (void)updateRecognitionResultText:(NSString *) resultText {
dispatch_async(dispatch_get_main_queue(), ^{
self.recognitionResultLabel.textColor = UIColor.blackColor;
diff --git a/samples/objective-c/ios/speech-samples/speech-samplesUITests/speech-samplesUITests.m b/samples/objective-c/ios/speech-samples/speech-samplesUITests/speech-samplesUITests.m
index acd7280d1..fcfa90df1 100644
--- a/samples/objective-c/ios/speech-samples/speech-samplesUITests/speech-samplesUITests.m
+++ b/samples/objective-c/ios/speech-samples/speech-samplesUITests/speech-samplesUITests.m
@@ -107,4 +107,22 @@ - (void)testRecognizeFromPullStream {
[self waitForExpectationsWithTimeout:20 handler:nil];
}
+- (void)testRecognizeWithLanguageDetection {
+ // sleep to make sure elements are there
+ [NSThread sleepForTimeInterval:1];
+ XCUIElement * reco_button = app.buttons[@"recognize_language_detection_button"];
+ XCTAssert(reco_button.exists);
+
+ XCUIElement * result_label = app.staticTexts[@"result_label"];
+ XCTAssert(result_label.exists);
+
+ NSPredicate *pred = [NSPredicate predicateWithFormat:@"label == Language: en-US, %@", weatherText];
+
+ [self expectationForPredicate:pred evaluatedWithObject:result_label handler:nil];
+
+ [reco_button tap];
+
+ [self waitForExpectationsWithTimeout:20 handler:nil];
+}
+
@end